max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Trakttv.bundle/Contents/Libraries/Shared/plugin/preferences/options/pin.py | disrupted/Trakttv.bundle | 1,346 | 12721502 | from plugin.core.environment import translate as _
from plugin.managers.account import TraktAccountManager
from plugin.models import TraktAccount
from plugin.preferences.options.core.base import SimpleOption
import logging
log = logging.getLogger(__name__)
class PinOption(SimpleOption):
type = 'string'
group = (_('Authentication'), )
label = _('Authentication PIN')
preference = 'pin'
def on_database_changed(self, value, account=None):
# Update preference
return self._update_preference(value, account)
def on_plex_changed(self, value, account):
if not value:
# Ignore empty PIN field
return None
# Retrieve administrator account
trakt_account = TraktAccountManager.get(TraktAccount.account == account)
# Update administrator authorization
if not TraktAccountManager.update.from_pin(trakt_account, value):
log.warn('Unable to update account')
return None
return value
|
tensorflow_model_optimization/python/core/clustering/keras/mnist_clusterable_layer_test.py | Pandinosaurus/model-optimization | 1,318 | 12721530 | <filename>tensorflow_model_optimization/python/core/clustering/keras/mnist_clusterable_layer_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for a simple convnet with clusterable layer on the MNIST dataset."""
import tensorflow as tf
from tensorflow_model_optimization.python.core.clustering.keras import cluster
from tensorflow_model_optimization.python.core.clustering.keras import cluster_config
from tensorflow_model_optimization.python.core.clustering.keras import clusterable_layer
from tensorflow_model_optimization.python.core.clustering.keras import clustering_algorithm
tf.random.set_seed(42)
keras = tf.keras
EPOCHS = 7
EPOCHS_FINE_TUNING = 4
NUMBER_OF_CLUSTERS = 8
class MyDenseLayer(keras.layers.Dense, clusterable_layer.ClusterableLayer):
def get_clusterable_weights(self):
# Cluster kernel and bias.
return [('kernel', self.kernel), ('bias', self.bias)]
class ClusterableWeightsCA(clustering_algorithm.ClusteringAlgorithm):
"""This class provides a special lookup function for the the weights 'w'.
It reshapes and tile centroids the same way as the weights. This allows us
to find pulling indices efficiently.
"""
def get_pulling_indices(self, weight):
clst_num = self.cluster_centroids.shape[0]
tiled_weights = tf.tile(tf.expand_dims(weight, axis=2), [1, 1, clst_num])
tiled_cluster_centroids = tf.tile(
tf.reshape(self.cluster_centroids, [1, 1, clst_num]),
[weight.shape[0], weight.shape[1], 1])
# We find the nearest cluster centroids and store them so that ops can build
# their kernels upon it
pulling_indices = tf.argmin(
tf.abs(tiled_weights - tiled_cluster_centroids), axis=2)
return pulling_indices
class MyClusterableLayer(keras.layers.Layer,
clusterable_layer.ClusterableLayer):
def __init__(self, units=32, **kwargs):
super(MyClusterableLayer, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,),
initializer='random_normal',
trainable=False,
)
self.built = True
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(MyClusterableLayer, self).get_config()
config.update({'units': self.units})
return config
def get_clusterable_weights(self):
# Cluster only weights 'w'
return [('w', self.w)]
def get_clusterable_algorithm(self, weight_name):
"""Returns clustering algorithm for the custom weights 'w'."""
if weight_name == 'w':
return ClusterableWeightsCA
else:
# We don't cluster other weights.
return None
def _build_model():
"""Builds model with MyDenseLayer."""
i = tf.keras.layers.Input(shape=(28, 28), name='input')
x = tf.keras.layers.Reshape((28, 28, 1))(i)
x = tf.keras.layers.Conv2D(
filters=12, kernel_size=(3, 3), activation='relu', name='conv1')(
x)
x = tf.keras.layers.MaxPool2D(2, 2)(x)
x = tf.keras.layers.Flatten()(x)
output = MyDenseLayer(units=10)(x)
model = tf.keras.Model(inputs=[i], outputs=[output])
return model
def _build_model_2():
"""Builds model with MyClusterableLayer layer."""
i = tf.keras.layers.Input(shape=(28, 28), name='input')
x = tf.keras.layers.Reshape((28, 28, 1))(i)
x = tf.keras.layers.Conv2D(
filters=12, kernel_size=(3, 3), activation='relu', name='conv1')(
x)
x = tf.keras.layers.MaxPool2D(2, 2)(x)
x = tf.keras.layers.Flatten()(x)
output = MyClusterableLayer(units=10)(x)
model = tf.keras.Model(inputs=[i], outputs=[output])
return model
def _get_dataset():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Use subset of 60000 examples to keep unit test speed fast.
x_train = x_train[0:1000]
y_train = y_train[0:1000]
return (x_train, y_train), (x_test, y_test)
def _train_model(model):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])
(x_train, y_train), _ = _get_dataset()
model.fit(x_train, y_train, epochs=EPOCHS)
def _cluster_model(model, number_of_clusters):
(x_train, y_train), _ = _get_dataset()
clustering_params = {
'number_of_clusters':
number_of_clusters,
'cluster_centroids_init':
cluster_config.CentroidInitialization.DENSITY_BASED
}
# Cluster model
clustered_model = cluster.cluster_weights(model, **clustering_params)
# Use smaller learning rate for fine-tuning
# clustered model
opt = tf.keras.optimizers.Adam(learning_rate=1e-5)
clustered_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=opt,
metrics=['accuracy'])
# Fine-tune clustered model
clustered_model.fit(x_train, y_train, epochs=EPOCHS_FINE_TUNING)
stripped_model = cluster.strip_clustering(clustered_model)
stripped_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=opt,
metrics=['accuracy'])
return stripped_model
def _get_number_of_unique_weights(stripped_model, layer_nr, weight_name):
layer = stripped_model.layers[layer_nr]
weight = getattr(layer, weight_name)
weights_as_list = weight.numpy().reshape(-1,).tolist()
nr_of_unique_weights = len(set(weights_as_list))
return nr_of_unique_weights
class FunctionalTest(tf.test.TestCase):
def testMnistMyDenseLayer(self):
"""Test model with a custom clusterable layer derived from Dense.
This customerable layer (see MyDenseLayer definition above) provides the
function get_clusterable_weights() so that both 'kernel' weights as well
as 'bias' weights are clustered.
"""
model = _build_model()
_train_model(model)
# Checks that number of original weights('kernel') is greater than
# the number of clusters
nr_of_unique_weights = _get_number_of_unique_weights(model, -1, 'kernel')
self.assertGreater(nr_of_unique_weights, NUMBER_OF_CLUSTERS)
# Checks that number of original weights('bias') is greater than
# the number of clusters
nr_of_unique_weights = _get_number_of_unique_weights(model, -1, 'bias')
self.assertGreater(nr_of_unique_weights, NUMBER_OF_CLUSTERS)
_, (x_test, y_test) = _get_dataset()
results_original = model.evaluate(x_test, y_test)
self.assertGreater(results_original[1], 0.8)
clustered_model = _cluster_model(model, NUMBER_OF_CLUSTERS)
results = clustered_model.evaluate(x_test, y_test)
self.assertGreater(results[1], 0.8)
# checks 'kernel' weights of the last layer: MyDenseLayer
nr_of_unique_weights = _get_number_of_unique_weights(
clustered_model, -1, 'kernel')
self.assertLessEqual(nr_of_unique_weights, NUMBER_OF_CLUSTERS)
# checks 'bias' weights of the last layer: MyDenseLayer
nr_of_unique_weights = _get_number_of_unique_weights(
clustered_model, -1, 'bias')
self.assertLessEqual(nr_of_unique_weights, NUMBER_OF_CLUSTERS)
def testMnistClusterableLayer(self):
"""Test keras custom layer.
We test the keras custom layer with the provided clustering algorithm
(see MyClusterableLayer above). We cluster only 'w' weights and the class
ClusterableWeightsCA provides the function get_pulling_indices for the
layer-out of 'w' weights.
We skip evaluation in this test as it takes some time.
"""
model = _build_model_2()
_train_model(model)
# Checks that number of original weights 'w' is greater than
# the number of clusters.
nr_of_unique_weights = _get_number_of_unique_weights(model, -1, 'w')
self.assertGreater(nr_of_unique_weights, NUMBER_OF_CLUSTERS)
clustered_model = _cluster_model(model, NUMBER_OF_CLUSTERS)
# Checks clustered weights 'w'.
nr_of_unique_weights = _get_number_of_unique_weights(
clustered_model, -1, 'w')
self.assertLessEqual(nr_of_unique_weights, NUMBER_OF_CLUSTERS)
# Train again normally for sanity check
_train_model(clustered_model)
if __name__ == '__main__':
tf.test.main()
|
esmvaltool/diag_scripts/autoassess/autoassess_radiation_rms.py | yifatdzigan/ESMValTool | 148 | 12721532 | <gh_stars>100-1000
"""
Port to Version 2 with implementation of v2-specific changes.
Uses: ESMValTool v2, Python3.x
<NAME>, UREAD, July 2018
Porting replicates the functionality to minimum errors.
Original Description from Version 1 Diagnostic:
;;###########################################################################
;; AutoAssess_radiation_rms.py
;;###########################################################################
;; Description
;; This script is the RMS error metric script of
;; AutoAssess radiation
;; ###########################################################################
This diagnostic uses CMIP5 data; to switch to CMIP6 change _CMIP_TYPE.
"""
import os
import logging
import iris
from esmvaltool.diag_scripts.autoassess._rms_radiation import (start, end,
calc_all)
from esmvaltool.diag_scripts.autoassess._valmod_radiation import (
perform_equation)
from esmvaltool.diag_scripts.shared import (
group_metadata, run_diagnostic, get_control_exper_obs, apply_supermeans)
logger = logging.getLogger(os.path.basename(__file__))
_CMIP_TYPE = 'CMIP5'
def apply_rms(data_1, data_2, cfg, component_dict, var_name):
"""Compute RMS for any data1-2 combination."""
data_names = [model['dataset'] for model in component_dict.values()]
plot_title = var_name + ': ' + data_names[0] + ' vs ' + data_names[1]
rms_list = start(data_names[0], data_names[1])
analysis_type = cfg['analysis_type']
landsea_mask_file = os.path.join(
os.path.dirname(__file__), 'autoassess_source', cfg['landsea_mask'])
landsea_mask_cube = iris.load_cube(landsea_mask_file)
data1_vs_data2 = perform_equation(data_1, data_2, analysis_type)
# call to rms.calc_all() to compute rms; rms.end() to write results
calc_all(rms_list, data1_vs_data2, landsea_mask_cube, plot_title)
end(rms_list, cfg['work_dir'])
def do_preamble(cfg):
"""Execute some preamble functionality."""
# get data
input_data = cfg['input_data'].values()
grouped_input_data = group_metadata(
input_data, 'short_name', sort='dataset')
return input_data, grouped_input_data
def main(cfg):
"""Execute the radiation rms diag."""
logger.setLevel(cfg['log_level'].upper())
input_data, grouped_input_data = do_preamble(cfg)
# select variables and their corresponding
# obs files
for short_name in grouped_input_data:
logger.info("Processing variable %s", short_name)
# control, experiment and obs's
ctrl, exper, obslist = get_control_exper_obs(short_name, input_data,
cfg, _CMIP_TYPE)
# apply the supermeans
ctrl_sm, exper_sm, obs_sm_list = apply_supermeans(ctrl, exper, obslist)
# assemble a dict that contains various params depending
# on the data combinations for RMS computations
# control-experiment
data_component_dict = {'ct-ex': {'ctrl': ctrl, 'exper': exper}}
logger.info("Computing CONTROL-EXPERIMENT RMS...")
apply_rms(ctrl_sm, exper_sm, cfg, data_component_dict['ct-ex'],
short_name)
if obs_sm_list:
for obs, obsfile in zip(obs_sm_list, obslist):
data_component_dict = {
'ct-obs': {
'ctrl': ctrl,
'obs': obsfile
},
'ex-obs': {
'exper': exper,
'obs': obsfile
}
}
# ctrl-obs
logger.info("Computing CONTROL-OBS RMS...")
apply_rms(ctrl_sm, obs, cfg, data_component_dict['ct-obs'],
short_name)
# exper-obs
logger.info("Computing EXPERIMENT-OBS RMS...")
apply_rms(exper_sm, obs, cfg, data_component_dict['ex-obs'],
short_name)
else:
# only ctrl-exper
data_component_dict = {'ct-ex': {'ctrl': ctrl, 'exper': exper}}
logger.info("Computing CONTROL-EXPERIMENT RMS...")
apply_rms(ctrl_sm, exper_sm, cfg, data_component_dict['ct-ex'],
short_name)
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
|
VMEncryption/test/console_logger.py | shridpant/azure-linux-extensions | 266 | 12721540 | #!/usr/bin/env python
#
# *********************************************************
# Copyright (c) Microsoft. All rights reserved.
#
# Apache 2.0 License
#
# You may obtain a copy of the License at
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# *********************************************************
import os
import string
import json
class HandlerContext:
def __init__(self, name):
self._name = name
self._version = '0.0'
return
class ConsoleLogger(object):
def __init__(self):
self.current_process_id = os.getpid()
self._context = HandlerContext("test")
self._context._config = json.loads('{"runtimeSettings": [{"handlerSettings": {"publicSettings": {"EncryptionOperation": "EnableEncryptionFormatAll"}}}]}')
def log(self, msg, level='Info'):
""" simple logging mechanism to print to stdout """
log_msg = "{0}: [{1}] {2}".format(self.current_process_id, level, msg)
print(log_msg)
def error(self, msg):
log(msg,'Error')
|
test/unit/shed_unit/__init__.py | rikeshi/galaxy | 1,085 | 12721542 | <filename>test/unit/shed_unit/__init__.py<gh_stars>1000+
"""
Module cannot be called tool_shed, because this conflicts with lib/tool_shed
also at top level of path.
"""
|
ggplot/scales/scale_x_discrete.py | themiwi/ggplot | 1,133 | 12721574 | <filename>ggplot/scales/scale_x_discrete.py
from .scale_x_continuous import scale_x_continuous as scale_x_discrete
|
image_transform.py | ShuaiW/kaggle-heart | 182 | 12721576 | <filename>image_transform.py
"""Library implementing the data augmentations.
"""
import numpy as np
import skimage.io
import skimage.transform
from custom_warnings import deprecated
tform_identity = skimage.transform.AffineTransform()
NO_AUGMENT_PARAMS = {
"zoom_x": 1.0,
"zoom_y": 1.0,
"rotate": 0.0,
"shear": 0.0,
"skew_x": 0.0,
"skew_y": 0.0,
"translate_x": 0.0,
"translate_y": 0.0,
"flip_vert": 0.0,
"roll_time": 0.0,
"flip_time": 0.0,
"change_brightness": 0.0,
}
def resize_to_make_it_fit(images, output_shape=(50, 50)):
"""Resizes the images to a given shape.
"""
max_time = max(images[i].shape[0] for i in xrange(len(images)))
final_shape = (len(images),max_time) + output_shape
result = np.zeros(final_shape, dtype="float32")
volume_change = []
#result.reshape((final_shape[0],-1) + output_shape)
for i, mri_slice in enumerate(images):
mri_slice = mri_slice.reshape((-1,)+mri_slice.shape[-2:])
scaling = max(mri_slice[0].shape[-2]/output_shape[-2], mri_slice[0].shape[-1]/output_shape[-1])
tform = build_rescale_transform(scaling, mri_slice[0].shape[-2:], target_shape=output_shape)
for j, frame in enumerate(mri_slice):
# TODO: can't this be done better?
result[i,j] = fast_warp(frame, tform, output_shape=output_shape)
A = tform.params[:2, :2]
volume_change.append(np.linalg.norm(A[:,0]) * np.linalg.norm(A[:,1]))
assert tform.params[2,2] == 1, (tform.params[2,2],)
#result.reshape(final_shape)
return result, volume_change
@deprecated
def normscale_resize_and_augment(slices, output_shape=(50, 50), augment=None,
pixel_spacing=(1,1), shift_center=(.4, .5),
normalised_patch_size=(200,200)):
"""Normalizes the scale, augments, and crops the image.
WARNING: This function contains bugs. We kept it around to ensure older
models would still behave in the same way. Use normscale_resize_and_augment_2
instead.
"""
if not pixel_spacing[0] == pixel_spacing[1]:
raise NotImplementedError("Only supports square pixels")
# No augmentation:
if augment is None:
augment = NO_AUGMENT_PARAMS
current_shape = slices[0].shape[-2:]
normalised_shape = tuple(int(float(d)*ps) for d,ps in zip(current_shape, pixel_spacing))
max_time = max(slices[i].shape[0] for i in xrange(len(slices)))
final_shape = (len(slices),max_time) + output_shape
result = np.zeros(final_shape, dtype="float32")
for i, mri_slice in enumerate(slices):
# For each slice, build a transformation that extracts the right patch,
# and augments the data.
# First, we scale the images such that they all have the same scale
norm_rescaling = 1./pixel_spacing[0]
tform_normscale = build_rescale_transform(
norm_rescaling, mri_slice[0].shape[-2:], target_shape=normalised_shape)
# Next, we shift the center of the image to the left (assumes upside_up normalisation)
tform_shift_center, tform_shift_uncenter = (
build_shift_center_transform(
normalised_shape, shift_center, normalised_patch_size))
# zooming is OK
augment_tform = build_augmentation_transform(**augment)
patch_scale = max(
normalised_patch_size[0]/output_shape[0],
normalised_patch_size[1]/output_shape[1])
tform_patch_scale = build_rescale_transform(
patch_scale, normalised_patch_size, target_shape=output_shape)
# x and y axis transform
total_tform = tform_patch_scale + tform_shift_uncenter + augment_tform + tform_shift_center + tform_normscale
# Time axis transform
t_map = range(mri_slice.shape[0])
if "roll_time" in augment:
t_map = np.roll(t_map, int(np.floor(augment["roll_time"])))
if "flip_time" in augment and augment["flip_time"] > 0.5:
t_map = t_map[::-1]
for j, frame in enumerate(mri_slice):
j_shifted = t_map[j]
result[i,j_shifted] = fast_warp(frame, total_tform, output_shape=output_shape)
return result
NRMSC_DEFAULT_SHIFT_CENTER = (.4, .5)
def normscale_resize_and_augment_2(slices, output_shape=(50, 50), augment=None,
pixel_spacing=(1,1), shift_center=(None, None),
normalised_patch_size=(200,200)):
"""Normalizes the scale, augments, and crops the image.
"""
if not pixel_spacing[0] == pixel_spacing[1]:
raise NotImplementedError("Only supports square pixels")
if shift_center == (None, None):
shift_center = NRMSC_DEFAULT_SHIFT_CENTER
# No augmentation:
if augment is None:
augment = NO_AUGMENT_PARAMS
current_shape = slices[0].shape[-2:]
normalised_shape = tuple(int(float(d)*ps) for d,ps in zip(current_shape, pixel_spacing))
max_time = max(slices[i].shape[0] for i in xrange(len(slices)))
final_shape = (len(slices),max_time) + output_shape
result = np.zeros(final_shape, dtype="float32")
for i, mri_slice in enumerate(slices):
# For each slice, build a transformation that extracts the right patch,
# and augments the data.
# First, we scale the images such that they all have the same scale
norm_rescaling = 1./pixel_spacing[0]
tform_normscale = build_rescale_transform(
norm_rescaling, mri_slice[0].shape[-2:], target_shape=normalised_shape)
# Next, we shift the center of the image to the left (assumes upside_up normalisation)
tform_shift_center, tform_shift_uncenter = (
build_shift_center_transform(
normalised_shape, shift_center, normalised_patch_size))
augment_tform = build_augmentation_transform(**augment)
patch_scale = max(
float(normalised_patch_size[0])/output_shape[0],
float(normalised_patch_size[1])/output_shape[1])
tform_patch_scale = build_rescale_transform(
patch_scale, normalised_patch_size, target_shape=output_shape)
# x and y axis transform
total_tform = tform_patch_scale + tform_shift_uncenter + augment_tform + tform_shift_center + tform_normscale
# Time axis transform
t_map = range(mri_slice.shape[0])
if "roll_time" in augment:
t_map = np.roll(t_map, int(np.floor(augment["roll_time"])))
if "flip_time" in augment and augment["flip_time"] > 0.5:
t_map = t_map[::-1]
for j, frame in enumerate(mri_slice):
j_shifted = t_map[j]
result[i,j_shifted] = fast_warp(frame, total_tform, output_shape=output_shape)
return result
def resize_and_augment(images, output_shape=(50, 50), augment=None):
if augment is None:
return resize_to_make_it_fit(images, output_shape=output_shape)
max_time = max(images[i].shape[0] for i in xrange(len(images)))
final_shape = (len(images),max_time) + output_shape
result = np.zeros(final_shape, dtype="float32")
volume_change = []
#result.reshape((final_shape[0],-1) + output_shape)
for i, mri_slice in enumerate(images):
mri_slice = mri_slice.reshape((-1,)+mri_slice.shape[-2:])
scaling = max(1.0*mri_slice[0].shape[-2]/output_shape[-2], 1.0*mri_slice[0].shape[-1]/output_shape[-1])
tform = build_rescale_transform(scaling, mri_slice[0].shape[-2:], target_shape=output_shape)
# add rotation
# add skew
# add translation
tform_center, tform_uncenter = build_center_uncenter_transforms(mri_slice[0].shape[-2:])
augment_tform = build_augmentation_transform((1.0, 1.0), augment["rotation"], augment["shear"], augment["translation"], flip=False)
total_tform = tform + tform_uncenter + augment_tform + tform_center
for j, frame in enumerate(mri_slice):
result[i,j] = fast_warp(frame, total_tform, output_shape=output_shape)
A = total_tform.params[:2, :2]
volume_change.append(np.linalg.norm(A[:,0]) * np.linalg.norm(A[:,1]))
assert total_tform.params[2,2] == 1, (total_tform.params[2,2],)
#result.reshape(final_shape)
return result, volume_change
def resize_to_make_sunny_fit(image, output_shape=(50, 50)):
scaling = max(image.shape[-2]/output_shape[-2], image.shape[-1]/output_shape[-1])
tform = build_rescale_transform(scaling, image.shape[-2:], target_shape=output_shape)
return fast_warp(image, tform, output_shape=output_shape)
def resize_and_augment_sunny(image, output_shape=(50, 50), augment=None):
if augment is None:
return resize_to_make_sunny_fit(image, output_shape=(50, 50))
final_shape = image.shape[:-2] + output_shape
result = np.zeros(final_shape, dtype="float32")
#result.reshape((final_shape[0],-1) + output_shape)
scaling = max(image.shape[-2]/output_shape[-2], image.shape[-1]/output_shape[-1])
tform = build_rescale_transform(scaling, image.shape[-2:], target_shape=output_shape)
# add rotation
# add skew
# add translation
tform_center, tform_uncenter = build_center_uncenter_transforms(image.shape[-2:])
augment_tform = build_augmentation_transform((1.0, 1.0), augment["rotation"], augment["shear"], augment["translation"], flip=False)
total_tform = tform + tform_uncenter + augment_tform + tform_center
#result.reshape(final_shape)
return fast_warp(image, total_tform, output_shape=output_shape, mode='constant')
def fast_warp(img, tf, output_shape=(50, 50), mode='constant', order=1):
"""
This wrapper function is faster than skimage.transform.warp
"""
m = tf.params # tf._matrix is
return skimage.transform._warps_cy._warp_fast(img, m, output_shape=output_shape, mode=mode, order=order)
def build_centering_transform(image_shape, target_shape=(50, 50)):
rows, cols = image_shape
trows, tcols = target_shape
shift_x = (cols - tcols) / 2.0
shift_y = (rows - trows) / 2.0
return skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))
def build_rescale_transform(downscale_factor, image_shape, target_shape):
"""
estimating the correct rescaling transform is slow, so just use the
downscale_factor to define a transform directly. This probably isn't
100% correct, but it shouldn't matter much in practice.
"""
rows, cols = image_shape
trows, tcols = target_shape
tform_ds = skimage.transform.AffineTransform(scale=(downscale_factor, downscale_factor))
# centering
shift_x = cols / (2.0 * downscale_factor) - tcols / 2.0
shift_y = rows / (2.0 * downscale_factor) - trows / 2.0
tform_shift_ds = skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))
return tform_shift_ds + tform_ds
def build_center_uncenter_transforms(image_shape):
"""
These are used to ensure that zooming and rotation happens around the center of the image.
Use these transforms to center and uncenter the image around such a transform.
"""
center_shift = np.array([image_shape[1], image_shape[0]]) / 2.0 - 0.5 # need to swap rows and cols here apparently! confusing!
tform_uncenter = skimage.transform.SimilarityTransform(translation=-center_shift)
tform_center = skimage.transform.SimilarityTransform(translation=center_shift)
return tform_center, tform_uncenter
def build_shift_center_transform(image_shape, center_location, patch_size):
"""Shifts the center of the image to a given location.
This function tries to include as much as possible of the image in the patch
centered around the new center. If the patch arount the ideal center
location doesn't fit within the image, we shift the center to the right so
that it does.
"""
center_absolute_location = [
center_location[0]*image_shape[1], center_location[1]*image_shape[0]]
# Check for overlap at the edges
center_absolute_location[0] = max(
center_absolute_location[0], patch_size[1]/2.0)
center_absolute_location[1] = max(
center_absolute_location[1], patch_size[0]/2.0)
center_absolute_location[0] = min(
center_absolute_location[0], image_shape[1] - patch_size[1]/2.0)
center_absolute_location[1] = min(
center_absolute_location[1], image_shape[0] - patch_size[0]/2.0)
# Check for overlap at both edges
if patch_size[0] > image_shape[0]:
center_absolute_location[1] = image_shape[0] / 2.0
if patch_size[1] > image_shape[1]:
center_absolute_location[0] = image_shape[1] / 2.0
# Build transform
new_center = np.array(center_absolute_location)
translation_center = new_center - 0.5
translation_uncenter = -np.array((patch_size[1]/2.0, patch_size[0]/2.0)) - 0.5
return (
skimage.transform.SimilarityTransform(translation=translation_center),
skimage.transform.SimilarityTransform(translation=translation_uncenter))
def build_augmentation_transform(zoom_x=1.0,
zoom_y=1.0,
skew_x=0,
skew_y=0,
rotate=0,
shear=0,
translate_x=0,
translate_y=0,
flip=False,
flip_vert=False,
**kwargs):
#print "Not performed transformations:", kwargs.keys()
if flip > 0.5:
shear += 180
rotate += 180
# shear by 180 degrees is equivalent to rotation by 180 degrees + flip.
# So after that we rotate it another 180 degrees to get just the flip.
if flip_vert > 0.5:
shear += 180
tform_augment = skimage.transform.AffineTransform(scale=(1/zoom_x, 1/zoom_y), rotation=np.deg2rad(rotate), shear=np.deg2rad(shear), translation=(translate_x, translate_y))
skew_x = np.deg2rad(skew_x)
skew_y = np.deg2rad(skew_y)
tform_skew = skimage.transform.ProjectiveTransform(matrix=np.array([[np.tan(skew_x)*np.tan(skew_y) + 1, np.tan(skew_x), 0],
[np.tan(skew_y), 1, 0],
[0, 0, 1]]))
return tform_skew + tform_augment
@deprecated
def random_perturbation_transform(zoom_range=[1.0, 1.0], rotation_range=[0.0, 0.0], skew_x_range=[0.0, 0.0], skew_y_range=[0.0, 0.0], shear_range=[0.0, 0.0], translation_range=[0.0, 0.0], do_flip=True, allow_stretch=False, rng=np.random):
shift_x = rng.uniform(*translation_range)
shift_y = rng.uniform(*translation_range)
translate = (shift_x, shift_y)
rotate = rng.uniform(*rotation_range)
shear = rng.uniform(*shear_range)
skew_x = rng.uniform(*skew_x_range)
skew_y = rng.uniform(*skew_y_range)
if do_flip:
flip = (rng.randint(2) > 0) # flip half of the time
else:
flip = False
# random zoom
log_zoom_range = [np.log(z) for z in zoom_range]
if isinstance(allow_stretch, float):
log_stretch_range = [-np.log(allow_stretch), np.log(allow_stretch)]
zoom = np.exp(rng.uniform(*log_zoom_range))
stretch_x = np.exp(rng.uniform(*log_stretch_range))
stretch_y = np.exp(rng.uniform(*log_stretch_range))
zoom_x = zoom * stretch_x
zoom_y = zoom * stretch_y
elif allow_stretch is True: # avoid bugs, f.e. when it is an integer
zoom_x = np.exp(rng.uniform(*log_zoom_range))
zoom_y = np.exp(rng.uniform(*log_zoom_range))
else:
zoom_x = zoom_y = np.exp(rng.uniform(*log_zoom_range))
# the range should be multiplicatively symmetric, so [1/1.1, 1.1] instead of [0.9, 1.1] makes more sense.
return build_augmentation_transform(zoom_x=zoom_x,
zoom_y=zoom_y,
skew_x=skew_x,
skew_y=skew_y,
rotate=rotate,
shear=shear,
translate_x=translate[0],
translate_y=translate[1],
flip=flip
)
@deprecated
def perturb(img, augmentation_params, target_shape=(50, 50), rng=np.random):
# # DEBUG: draw a border to see where the image ends up
# img[0, :] = 0.5
# img[-1, :] = 0.5
# img[:, 0] = 0.5
# img[:, -1] = 0.5
tform_centering = build_centering_transform(img.shape, target_shape)
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = random_perturbation_transform(rng=rng, **augmentation_params)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
return fast_warp(img, tform_centering + tform_augment, output_shape=target_shape, mode='constant').astype('float32')
## RESCALING
@deprecated
def perturb_rescaled(img, scale, augmentation_params, target_shape=(50, 50), rng=np.random):
"""
scale is a DOWNSCALING factor.
"""
tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = random_perturbation_transform(rng=rng, **augmentation_params)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
return fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32')
# for test-time augmentation
@deprecated
def perturb_rescaled_fixed(img, scale, tform_augment, target_shape=(50, 50)):
"""
scale is a DOWNSCALING factor.
"""
tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering
tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape)
tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing)
return fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32')
|
tests/test_doctests.py | brainix/pottery | 625 | 12721580 | <filename>tests/test_doctests.py<gh_stars>100-1000
# --------------------------------------------------------------------------- #
# test_doctests.py #
# #
# Copyright © 2015-2021, <NAME>, original author. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at: #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
import doctest
import importlib
import os
import pathlib
import unittest
from tests.base import TestCase # type: ignore
class DoctestTests(TestCase): # pragma: no cover
@staticmethod
def _modules():
test_dir = pathlib.Path(__file__).parent
root_dir = test_dir.parent
source_dir = root_dir / 'pottery'
source_files = source_dir.glob('**/*.py')
for source_file in source_files:
relative_path = source_file.relative_to(root_dir)
parts = list(relative_path.parts)
parts[-1] = source_file.stem
module_name = '.'.join(parts)
module = importlib.import_module(module_name)
yield module
@unittest.skipUnless(
'TEST_DOCTESTS' in os.environ,
'our doctests run too slowly',
)
def test_doctests(self):
'Run doctests and confirm that they work and are not science fiction'
for module in self._modules():
with self.subTest(module=module):
results = doctest.testmod(m=module)
assert not results.failed
|
etl/parsers/etw/Microsoft_Windows_DCLocator.py | IMULMUL/etl-parser | 104 | 12721581 | <reponame>IMULMUL/etl-parser
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-DCLocator
GUID : cfaa5446-c6c4-4f5c-866f-31c9b55b962d
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=1, version=0)
class Microsoft_Windows_DCLocator_1_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=2, version=0)
class Microsoft_Windows_DCLocator_2_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=3, version=0)
class Microsoft_Windows_DCLocator_3_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=4, version=0)
class Microsoft_Windows_DCLocator_4_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=5, version=0)
class Microsoft_Windows_DCLocator_5_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=6, version=0)
class Microsoft_Windows_DCLocator_6_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=7, version=0)
class Microsoft_Windows_DCLocator_7_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=8, version=0)
class Microsoft_Windows_DCLocator_8_0(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("cfaa5446-c6c4-4f5c-866f-31c9b55b962d"), event_id=9, version=0)
class Microsoft_Windows_DCLocator_9_0(Etw):
pattern = Struct(
"Message" / WString
)
|
src/aft_lambda/aft_customizations/aft_customizations_get_pipeline_executions.py | mondelez-ctiso/terraform-aws-control_tower_account_factory | 219 | 12721592 | <gh_stars>100-1000
# Copyright Amazon.com, Inc. or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import inspect
from typing import TYPE_CHECKING, Any, Dict
from aft_common import aft_utils as utils
from aft_common import notifications
from aft_common.customizations import get_running_pipeline_count, list_pipelines
from boto3.session import Session
if TYPE_CHECKING:
from aws_lambda_powertools.utilities.typing import LambdaContext
else:
LambdaContext = object
logger = utils.get_logger()
def lambda_handler(event: Dict[str, Any], context: LambdaContext) -> Dict[str, int]:
session = Session()
try:
pipelines = list_pipelines(session)
running_pipelines = get_running_pipeline_count(session, pipelines)
return {"running_pipelines": running_pipelines}
except Exception as error:
notifications.send_lambda_failure_sns_message(
session=session,
message=str(error),
context=context,
subject="Failed to list all AFT account customization pipelines",
)
message = {
"FILE": __file__.split("/")[-1],
"METHOD": inspect.stack()[0][3],
"EXCEPTION": str(error),
}
logger.exception(message)
raise
|
archivebox/core/middleware.py | sarvex/ArchiveBox | 6,340 | 12721620 | __package__ = 'archivebox.core'
from django.utils import timezone
from ..config import PUBLIC_SNAPSHOTS
def detect_timezone(request, activate: bool=True):
gmt_offset = (request.COOKIES.get('GMT_OFFSET') or '').strip()
tz = None
if gmt_offset.replace('-', '').isdigit():
tz = timezone.get_fixed_timezone(int(gmt_offset))
if activate:
timezone.activate(tz)
# print('GMT_OFFSET', gmt_offset, tz)
return tz
def TimezoneMiddleware(get_response):
def middleware(request):
detect_timezone(request, activate=True)
return get_response(request)
return middleware
def CacheControlMiddleware(get_response):
def middleware(request):
response = get_response(request)
if '/archive/' in request.path or '/static/' in request.path:
policy = 'public' if PUBLIC_SNAPSHOTS else 'private'
response['Cache-Control'] = f'{policy}, max-age=60, stale-while-revalidate=300'
# print('Set Cache-Control header to', response['Cache-Control'])
return response
return middleware
|
sitepackages/djangosecure/decorators.py | bitcpf/djangoage | 167 | 12721638 | from django.utils.functional import wraps
def frame_deny_exempt(view):
@wraps(view)
def inner(*args, **kwargs):
response = view(*args, **kwargs)
response._frame_deny_exempt = True
return response
return inner
|
LeetCode/python3/1031.py | ZintrulCre/LeetCode_Archiver | 279 | 12721648 | <gh_stars>100-1000
class Solution:
def maxSumTwoNoOverlap(self, A: List[int], L: int, M: int) -> int:
prefix, n, res, left = [0 for _ in range(len(A) + 1)], len(A) + 1, 0, 0
for i in range(1, n):
prefix[i] = prefix[i - 1] + A[i - 1]
for i in range(L + M, n):
left = max(left, prefix[i - M] - prefix[i - M - L])
res = max(res, left + prefix[i] - prefix[i - M])
left = 0
for i in range(L + M, n):
left = max(left, prefix[i - L] - prefix[i - M - L])
res = max(res, left + prefix[i] - prefix[i - L])
return res
|
recipes/Python/576626_Skype_chat_to_speech/recipe-576626.py | tdiprima/code | 2,023 | 12721656 | # ----------------------------------------------------------------------------------------------------
# Python / Skype4Py example that prints out chat messages
#
# Tested with Skype4Py version 0.9.28.5 and Skype verson 192.168.3.11
import sys
import os
import time
import Skype4Py
import random
def ndsSay(ndsWords):
ndsIn = str(ndsWords)
zcmd='espeak "'+ndsIn+'"'
print zcmd
f=os.popen(zcmd)
f.close()
ndsWords=""
ndsTalk=""
zcmd=''
# ----------------------------------------------------------------------------------------------------
# Fired on attachment status change. Here used to re-attach this script to Skype in case attachment is lost. Just in
#case.
def OnAttach(status):
print 'API attachment status: ' + skype.Convert.AttachmentStatusToText(status)
if status == Skype4Py.apiAttachAvailable:
skype.Attach()
if status == Skype4Py.apiAttachSuccess:
print('***************************************')
# ----------------------------------------------------------------------------------------------------
# Fired on chat message status change.
# Statuses can be: 'UNKNOWN' 'SENDING' 'SENT' 'RECEIVED' 'READ'
def OnMessageStatus(Message, Status):
if Status == 'RECEIVED':
print(Message.FromDisplayName + ': ' + Message.Body)
ndsSay(Message.FromDisplayName)
ndsSay(Message.Body)
if Status == 'READ':
ndsMonkey = "todo"
print(Message.FromDisplayName + ': ' + Message.Body)
ndsSay(Message.FromDisplayName)
ndsSay(Message.Body)
if Status == 'SENT':
print('Myself ' + Message.Body)
# ----------------------------------------------------------------------------------------------------
# Creating instance of Skype object, assigning handler functions and attaching to Skype.
skype = Skype4Py.Skype()
skype.OnAttachmentStatus = OnAttach
skype.OnMessageStatus = OnMessageStatus
print('***************************************')
print 'Connecting to Skype..'
skype.Attach()
# ----------------------------------------------------------------------------------------------------
# Looping until user types 'exit'
Cmd = ''
while not Cmd == 'exit':
Cmd = raw_input('')
|
others/rpi-tkinter/main.py | sesu089/stackoverflow | 302 | 12721666 | <gh_stars>100-1000
from tkinter import Tk,Label,Button
from tkinter import *
import sys
import time
import os
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(21,GPIO.IN, pull_up_down =GPIO.PUD_DOWN)
top=Tk()
top.minsize(666,666)
top.maxsize(666,666)
######################################################################
class App:
def __init__(self, master):
####################################################################
self.button = Button(top, text='START',command=self.convert0)
self.button.place(x=50,y=50)
self.label=Label(top,text='').grid(row=20, column=5)
self.clock = Label(top, font=('times', 20, 'bold'), bg='green')
self.clock.place(x=200,y=200)
self.isRunning = False
GPIO.add_event_detect(21, GPIO.BOTH, callback=self.callback)
###################################################################
def convert0 ( self,tog=[0]):
tog[0] = not tog[0]
if tog[0]:
#########################################
self.button.config(text='START')
self.button.configure(bg = "blue")
self.button.configure(fg = "white")
self.label=Label(top,text='OFF',bg="blue",fg="white").place(x=150,y=55)
#########################################
else:
self.button.config(text='STOP')
self.button.configure(bg ="red")
self.button.configure(fg ="white")
self.label=Label(top,text='OFF',bg="red",fg="red").place(x=150,y=55)
self.label=Label(top,text='ON',bg="red",fg="white").place(x=150,y=55)
#########################################
def tick(self):
# get the current local time from the PC
time1 = time.strftime('%I:%M:%S')
# if time string has changed, update it
self.clock.config(text=time1)
# calls itself every 200 milliseconds
# to update the time display as needed
# could use >200 ms, but display gets jerky
if self.isRunning:
self.clock.after(200,self.tick)
###################################################################
def start(self):
self.isRunning = True
self.clock.after(200,self.tick)
def stop(self):
self.isRunning = False
def callback(self, channel):
if self.isRunning:
self.stop()
else:
self.start()
app = App(top)
top.mainloop()
|
tests/tensortrade/unit/feed/core/test_generic.py | nicomon24/tensortrade | 3,081 | 12721711 | <filename>tests/tensortrade/unit/feed/core/test_generic.py
from tensortrade.feed import Stream, DataFeed
def test_generic():
s1 = Stream.source(["hello", "my", "name", "is"], dtype="string")
s2 = Stream.source([1, 2, 3, 4, 5, 6])
g1 = s1.apply(lambda x: x[0]).rename("g1")
g2 = s2.lag().rename("g2")
feed = DataFeed([g1, g2])
feed.compile()
feed.next()
assert feed.next() == {"g1": "m", "g2": 1}
|
openvqa/models/butd/tda.py | AcceptedDoge/openvqa | 274 | 12721713 | # --------------------------------------------------------
# OpenVQA
# Written by <NAME> https://github.com/ParadoxZW
# based on the implementation in https://github.com/hengyuan-hu/bottom-up-attention-vqa
# ELU is chosen as the activation function in non-linear layers due to
# the experiment results that indicate ELU is better than ReLU in BUTD model.
# --------------------------------------------------------
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.weight_norm import weight_norm
import torch
import math
# ------------------------------
# ----- Weight Normal MLP ------
# ------------------------------
class MLP(nn.Module):
"""
class for non-linear fully connect network
"""
def __init__(self, dims, act='ELU', dropout_r=0.0):
super(MLP, self).__init__()
layers = []
for i in range(len(dims) - 1):
in_dim = dims[i]
out_dim = dims[i + 1]
if dropout_r > 0:
layers.append(nn.Dropout(dropout_r))
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
if act != '':
layers.append(getattr(nn, act)())
self.mlp = nn.Sequential(*layers)
def forward(self, x):
return self.mlp(x)
# ------------------------------
# ---Top Down Attention Map ----
# ------------------------------
class AttnMap(nn.Module):
'''
implementation of top down attention
'''
def __init__(self, __C):
super(AttnMap, self).__init__()
self.__C = __C
self.linear_q = weight_norm(
nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE), dim=None)
self.linear_v = weight_norm(
nn.Linear(__C.IMG_FEAT_SIZE, __C.IMG_FEAT_SIZE), dim=None)
self.nonlinear = MLP(
[__C.IMG_FEAT_SIZE + __C.HIDDEN_SIZE, __C.HIDDEN_SIZE], dropout_r=__C.DROPOUT_R)
self.linear = weight_norm(nn.Linear(__C.HIDDEN_SIZE, 1), dim=None)
def forward(self, q, v):
v = self.linear_v(v)
q = self.linear_q(q)
logits = self.logits(q, v)
w = nn.functional.softmax(logits, 1)
return w
def logits(self, q, v):
num_objs = v.size(1)
q = q.unsqueeze(1).repeat(1, num_objs, 1)
vq = torch.cat((v, q), 2)
joint_repr = self.nonlinear(vq)
logits = self.linear(joint_repr)
return logits
# ------------------------------
# ---- Attended Joint Map ------
# ------------------------------
class TDA(nn.Module):
def __init__(self, __C):
super(TDA, self).__init__()
self.__C = __C
self.v_att = AttnMap(__C)
self.q_net = MLP([__C.HIDDEN_SIZE, __C.HIDDEN_SIZE])
self.v_net = MLP([__C.IMG_FEAT_SIZE, __C.HIDDEN_SIZE])
def forward(self, q, v):
att = self.v_att(q, v)
atted_v = (att * v).sum(1)
q_repr = self.q_net(q)
v_repr = self.v_net(atted_v)
joint_repr = q_repr * v_repr
return joint_repr
|
rpython/jit/backend/llsupport/test/test_regalloc.py | nanjekyejoannah/pypy | 333 | 12721723 | <reponame>nanjekyejoannah/pypy<gh_stars>100-1000
import py
import sys
from rpython.jit.metainterp.history import ConstInt, INT, FLOAT
from rpython.jit.metainterp.history import BasicFailDescr, TargetToken
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.metainterp.resoperation import InputArgInt, InputArgRef,\
InputArgFloat
from rpython.jit.backend.detect_cpu import getcpuclass
from rpython.jit.backend.llsupport.regalloc import FrameManager, LinkedList
from rpython.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan,\
Lifetime as RealLifetime, UNDEF_POS, BaseRegalloc, compute_vars_longevity,\
LifetimeManager
from rpython.jit.tool.oparser import parse
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.annlowlevel import llhelper
def newboxes(*values):
return [InputArgInt(v) for v in values]
def newrefboxes(count):
return [InputArgRef() for _ in range(count)]
def Lifetime(definition_pos=UNDEF_POS, last_usage=UNDEF_POS,
real_usages=UNDEF_POS):
if real_usages == UNDEF_POS:
real_usages = last_usage
lifetime = RealLifetime(definition_pos, last_usage)
if isinstance(real_usages, int):
real_usages = [real_usages]
lifetime.real_usages = real_usages
return lifetime
def boxes_and_longevity(num):
res = []
longevity = {}
for i in range(num):
box = InputArgInt(0)
res.append(box)
longevity[box] = Lifetime(0, 1)
return res, longevity
class FakeReg(object):
def __init__(self, i):
self.n = i
def _getregkey(self):
return self.n
def is_memory_reference(self):
return False
def __repr__(self):
return 'r%d' % self.n
r0, r1, r2, r3 = [FakeReg(i) for i in range(4)]
r4, r5, r6, r7, r8, r9 = [FakeReg(i) for i in range(4, 10)]
regs = [r0, r1, r2, r3]
class RegisterManager(BaseRegMan):
all_regs = regs
def __init__(self, longevity, frame_manager=None, assembler=None):
if isinstance(longevity, dict):
longevity = LifetimeManager(longevity)
BaseRegMan.__init__(self, longevity, frame_manager, assembler)
def convert_to_imm(self, v):
return v
class FakeFramePos(object):
def __init__(self, pos, box_type):
self.pos = pos
self.value = pos
self.box_type = box_type
def _getregkey(self):
return ~self.value
def is_memory_reference(self):
return True
def __repr__(self):
return 'FramePos<%d,%s>' % (self.pos, self.box_type)
def __eq__(self, other):
return self.pos == other.pos and self.box_type == other.box_type
def __ne__(self, other):
return not self == other
class TFrameManagerEqual(FrameManager):
def frame_pos(self, i, box_type):
return FakeFramePos(i, box_type)
def frame_size(self, box_type):
return 1
def get_loc_index(self, loc):
assert isinstance(loc, FakeFramePos)
return loc.pos
class TFrameManager(FrameManager):
def frame_pos(self, i, box_type):
return FakeFramePos(i, box_type)
def frame_size(self, box_type):
if box_type == FLOAT:
return 2
else:
return 1
def get_loc_index(self, loc):
assert isinstance(loc, FakeFramePos)
return loc.pos
class FakeCPU(object):
def get_baseofs_of_frame_field(self):
return 0
class MockAsm(object):
def __init__(self):
self.moves = []
self.emitted = []
self.cpu = FakeCPU()
# XXX register allocation statistics to be removed later
self.num_moves_calls = 0
self.num_moves_jump = 0
self.num_spills = 0
self.num_spills_to_existing = 0
self.num_reloads = 0
self.preamble_num_moves_calls = 0
self.preamble_num_moves_jump = 0
self.preamble_num_spills = 0
self.preamble_num_spills_to_existing = 0
self.preamble_num_reloads = 0
def regalloc_mov(self, from_loc, to_loc):
self.moves.append((from_loc, to_loc))
self.emitted.append(("move", to_loc, from_loc))
def test_lifetime_next_real_usage():
lt = RealLifetime(0, 1000)
lt.real_usages = [0, 1, 5, 10, 24, 35, 55, 56, 57, 90, 92, 100]
for i in range(100):
next = lt.next_real_usage(i)
assert next in lt.real_usages
assert next > i
assert lt.real_usages[lt.real_usages.index(next) - 1] <= i
assert lt.next_real_usage(100) == -1
assert lt.next_real_usage(101) == -1
def test_fixed_position():
b0, b1, b2 = newboxes(0, 0, 0)
l0 = Lifetime(0, 5)
l1 = Lifetime(2, 9)
l2 = Lifetime(0, 9)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2})
longevity.fixed_register(1, r0, b0)
longevity.fixed_register(4, r2, b0)
longevity.fixed_register(5, r1, b1)
longevity.fixed_register(8, r1, b1)
assert l0.fixed_positions == [(1, r0), (4, r2)]
assert l1.fixed_positions == [(5, r1), (8, r1)]
assert l2.fixed_positions is None
fpr0 = longevity.fixed_register_use[r0]
fpr1 = longevity.fixed_register_use[r1]
fpr2 = longevity.fixed_register_use[r2]
assert r3 not in longevity.fixed_register_use
assert fpr0.index_lifetimes == [(1, 0)]
assert fpr1.index_lifetimes == [(5, 2), (8, 5)]
assert fpr2.index_lifetimes == [(4, 1)]
def test_fixed_position_none():
b0, b1, b2 = newboxes(0, 0, 0)
l0 = Lifetime(0, 5)
l1 = Lifetime(2, 9)
l2 = Lifetime(0, 9)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2})
longevity.fixed_register(1, r0)
longevity.fixed_register(4, r2)
longevity.fixed_register(5, r1)
longevity.fixed_register(8, r1)
fpr0 = longevity.fixed_register_use[r0]
fpr1 = longevity.fixed_register_use[r1]
fpr2 = longevity.fixed_register_use[r2]
assert r3 not in longevity.fixed_register_use
assert fpr0.index_lifetimes == [(1, 1)]
assert fpr1.index_lifetimes == [(5, 5), (8, 8)]
assert fpr2.index_lifetimes == [(4, 4)]
def test_free_until_pos_none():
longevity = LifetimeManager({})
longevity.fixed_register(5, r1, None)
longevity.fixed_register(8, r1, None)
longevity.fixed_register(35, r1, None)
fpr1 = longevity.fixed_register_use[r1]
assert fpr1.free_until_pos(0) == 5
assert fpr1.free_until_pos(1) == 5
assert fpr1.free_until_pos(2) == 5
assert fpr1.free_until_pos(3) == 5
assert fpr1.free_until_pos(4) == 5
assert fpr1.free_until_pos(5) == 5
assert fpr1.free_until_pos(10) == 35
assert fpr1.free_until_pos(20) == 35
assert fpr1.free_until_pos(30) == 35
assert fpr1.free_until_pos(36) == sys.maxint
def test_free_until_pos():
b0, b1, b2 = newboxes(0, 0, 0)
l0 = Lifetime(0, 5)
l1 = Lifetime(2, 9)
l2 = Lifetime(30, 40)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2})
longevity.fixed_register(5, r1, b1)
longevity.fixed_register(8, r1, b1)
longevity.fixed_register(35, r1, b2)
fpr1 = longevity.fixed_register_use[r1]
# simple cases: we are before the beginning of the lifetime of the variable
# in the fixed register, then it's free until the definition of the
# variable
assert fpr1.free_until_pos(0) == 2
assert fpr1.free_until_pos(1) == 2
assert fpr1.free_until_pos(2) == 2
assert fpr1.free_until_pos(10) == 30
assert fpr1.free_until_pos(20) == 30
assert fpr1.free_until_pos(30) == 30
# after the fixed use, we are fine anyway
assert fpr1.free_until_pos(36) == sys.maxint
assert fpr1.free_until_pos(50) == sys.maxint
# asking for a position *after* the definition of the variable in the fixed
# register means the variable didn't make it into the fixed register, but
# at the latest by the use point it will have to go there
assert fpr1.free_until_pos(3) == 5
assert fpr1.free_until_pos(4) == 5
assert fpr1.free_until_pos(5) == 5
assert fpr1.free_until_pos(6) == 8
assert fpr1.free_until_pos(7) == 8
assert fpr1.free_until_pos(8) == 8
assert fpr1.free_until_pos(31) == 35
assert fpr1.free_until_pos(32) == 35
assert fpr1.free_until_pos(33) == 35
assert fpr1.free_until_pos(34) == 35
assert fpr1.free_until_pos(35) == 35
def test_free_until_pos_different_regs():
b0, b1, b2 = newboxes(0, 0, 0)
l0 = Lifetime(0, 5)
l1 = Lifetime(2, 9)
l2 = Lifetime(30, 40)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2})
longevity.fixed_register(1, r0, b0)
longevity.fixed_register(4, r2, b0)
fpr2 = longevity.fixed_register_use[r2]
# the definition of b0 is before the other fixed register use of r0, so the
# earliest b0 can be in r2 is that use point at index 1
assert fpr2.free_until_pos(0) == 1
def test_longest_free_reg():
b0, b1, b2 = newboxes(0, 0, 0)
l0 = Lifetime(0, 5)
l1 = Lifetime(2, 9)
l2 = Lifetime(30, 40)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2})
longevity.fixed_register(1, r0, b0)
longevity.fixed_register(4, r2, b0)
longevity.fixed_register(5, r1, b1)
longevity.fixed_register(8, r1, b1)
longevity.fixed_register(35, r1, b2)
assert longevity.longest_free_reg(0, [r0, r1, r2]) == (r1, 2)
def test_try_pick_free_reg():
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
l0 = Lifetime(0, 4)
l1 = Lifetime(2, 20)
l2 = Lifetime(6, 20)
l3 = Lifetime(8, 20)
l4 = Lifetime(0, 10)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2, b3: l3, b4: l4})
longevity.fixed_register(3, r1, b1)
longevity.fixed_register(7, r2, b2)
longevity.fixed_register(9, r3, b3)
# a best fit
loc = longevity.try_pick_free_reg(0, b0, [r1, r2, r3, r4, r5])
assert loc is r2
# does not fit into any of the fixed regs, use a non-fixed one
loc = longevity.try_pick_free_reg(0, b4, [r5, r2, r3, r4, r1])
assert loc in [r4, r5]
# all available are fixed but var doesn't fit completely into any of these.
# pick the biggest interval
loc = longevity.try_pick_free_reg(0, b4, [r1, r2, r3])
assert loc is r3
def test_try_pick_free_reg_bug():
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
l0 = Lifetime(10, 30)
l1 = Lifetime(0, 15)
longevity = LifetimeManager({b0: l0, b1: l1})
longevity.fixed_register(20, r0, b0)
# does not fit into r0, use r1
loc = longevity.try_pick_free_reg(0, b1, [r0, r1])
assert loc == r1
def test_try_pick_free_reg_bug2():
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
l0 = Lifetime(1, 2)
l1 = Lifetime(2, 4)
longevity = LifetimeManager({b0: l0, b1: l1})
longevity.fixed_register(4, r1, b1)
# does not fit into r0, use r1
loc = longevity.try_pick_free_reg(0, b0, [r0, r1])
assert loc == r0
def test_simple_coalescing():
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
l0 = Lifetime(0, 4)
l1 = Lifetime(4, 20)
l2 = Lifetime(4, 20)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2})
longevity.fixed_register(10, r1, b1)
longevity.fixed_register(10, r2, b2)
longevity.try_use_same_register(b0, b2)
loc = longevity.try_pick_free_reg(0, b0, [r0, r1, r2, r3, r4])
assert loc is r2
def test_coalescing_blocks_regs_correctly():
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
l0 = Lifetime(10, 30)
l1 = Lifetime(30, 40)
l2 = Lifetime(30, 40)
l3 = Lifetime(0, 15)
l4 = Lifetime(0, 5)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2, b3: l3, b4: l4})
longevity.try_use_same_register(b0, b1)
longevity.fixed_register(35, r1, b1)
longevity.fixed_register(35, r2, b2)
loc = longevity.try_pick_free_reg(0, b3, [r1, r2])
# r2 is picked, otherwise b0 can't end up in r1
assert loc is r2
loc = longevity.try_pick_free_reg(0, b4, [r1, r2])
# r1 is picked, because b4 fits before b0
assert loc is r1
def test_coalescing_non_fixed_regs():
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
l0 = Lifetime(0, 10)
l1 = Lifetime(10, 20)
l2 = Lifetime(25, 40)
l3 = Lifetime(15, 40)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2, b3: l3})
longevity.try_use_same_register(b0, b1)
longevity.fixed_register(35, r2, b2)
longevity.fixed_register(35, r3, b3)
loc = longevity.try_pick_free_reg(0, b0, [r1, r2, r3])
# r2 is picked, otherwise b1 can't end up in the same reg as b0
assert loc is r2
def test_chained_coalescing():
# 5 + b4
# |
# 10 + b0 |
# | |
# | 15 +
# |
# +
# 20
# + b1
# |
# |
# |
# +
# 30
# + b2
# |
# r1 *
# |
# +
# 40
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
l0 = Lifetime(10, 20)
l1 = Lifetime(20, 30)
l2 = Lifetime(30, 40)
l4 = Lifetime(5, 15)
longevity = LifetimeManager({b0: l0, b1: l1, b2: l2, b4: l4})
longevity.try_use_same_register(b0, b1)
longevity.try_use_same_register(b1, b2)
longevity.fixed_register(35, r1, b2)
loc = longevity.try_pick_free_reg(5, b4, [r0, r1])
assert loc is r0
class TestRegalloc(object):
def test_freeing_vars(self):
b0, b1, b2 = newboxes(0, 0, 0)
longevity = {b0: Lifetime(0, 1), b1: Lifetime(0, 2), b2: Lifetime(0, 2)}
rm = RegisterManager(longevity)
rm.next_instruction()
for b in b0, b1, b2:
rm.try_allocate_reg(b)
rm._check_invariants()
assert len(rm.free_regs) == 1
assert len(rm.reg_bindings) == 3
rm.possibly_free_vars([b0, b1, b2])
assert len(rm.free_regs) == 1
assert len(rm.reg_bindings) == 3
rm._check_invariants()
rm.next_instruction()
rm.possibly_free_vars([b0, b1, b2])
rm._check_invariants()
assert len(rm.free_regs) == 2
assert len(rm.reg_bindings) == 2
rm._check_invariants()
rm.next_instruction()
rm.possibly_free_vars([b0, b1, b2])
rm._check_invariants()
assert len(rm.free_regs) == 4
assert len(rm.reg_bindings) == 0
def test_register_exhaustion(self):
boxes, longevity = boxes_and_longevity(5)
rm = RegisterManager(longevity)
rm.next_instruction()
for b in boxes[:len(regs)]:
assert rm.try_allocate_reg(b)
assert rm.try_allocate_reg(boxes[-1]) is None
rm._check_invariants()
def test_need_lower_byte(self):
boxes, longevity = boxes_and_longevity(5)
b0, b1, b2, b3, b4 = boxes
class XRegisterManager(RegisterManager):
no_lower_byte_regs = [r2, r3]
rm = XRegisterManager(longevity)
rm.next_instruction()
loc0 = rm.try_allocate_reg(b0, need_lower_byte=True)
assert loc0 not in XRegisterManager.no_lower_byte_regs
loc = rm.try_allocate_reg(b1, need_lower_byte=True)
assert loc not in XRegisterManager.no_lower_byte_regs
loc = rm.try_allocate_reg(b2, need_lower_byte=True)
assert loc is None
loc = rm.try_allocate_reg(b0, need_lower_byte=True)
assert loc is loc0
rm._check_invariants()
def test_specific_register(self):
boxes, longevity = boxes_and_longevity(5)
rm = RegisterManager(longevity)
rm.next_instruction()
loc = rm.try_allocate_reg(boxes[0], selected_reg=r1)
assert loc is r1
loc = rm.try_allocate_reg(boxes[1], selected_reg=r1)
assert loc is None
rm._check_invariants()
loc = rm.try_allocate_reg(boxes[0], selected_reg=r1)
assert loc is r1
loc = rm.try_allocate_reg(boxes[0], selected_reg=r2)
assert loc is r2
rm._check_invariants()
def test_force_allocate_reg(self):
boxes, longevity = boxes_and_longevity(5)
b0, b1, b2, b3, b4 = boxes
fm = TFrameManager()
class XRegisterManager(RegisterManager):
no_lower_byte_regs = [r2, r3]
rm = XRegisterManager(longevity,
frame_manager=fm,
assembler=MockAsm())
rm.next_instruction()
loc = rm.force_allocate_reg(b0)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b1)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b2)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b3)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b4)
assert isinstance(loc, FakeReg)
# one of those should be now somewhere else
locs = [rm.loc(b) for b in boxes]
used_regs = [loc for loc in locs if isinstance(loc, FakeReg)]
assert len(used_regs) == len(regs)
loc = rm.force_allocate_reg(b0, need_lower_byte=True)
assert isinstance(loc, FakeReg)
assert loc not in [r2, r3]
rm._check_invariants()
def test_make_sure_var_in_reg(self):
boxes, longevity = boxes_and_longevity(5)
fm = TFrameManager()
rm = RegisterManager(longevity, frame_manager=fm,
assembler=MockAsm())
rm.next_instruction()
# allocate a stack position
b0, b1, b2, b3, b4 = boxes
sp = fm.loc(b0)
assert sp.pos == 0
loc = rm.make_sure_var_in_reg(b0)
assert isinstance(loc, FakeReg)
rm._check_invariants()
def test_bogus_make_sure_var_in_reg(self):
b0, = newboxes(0)
longevity = {b0: Lifetime(0, 1)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
# invalid call to make_sure_var_in_reg(): box unknown so far
py.test.raises(KeyError, rm.make_sure_var_in_reg, b0)
def test_return_constant(self):
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
fm = TFrameManager()
rm = RegisterManager(longevity, assembler=asm,
frame_manager=fm)
rm.next_instruction()
loc = rm.return_constant(ConstInt(1), selected_reg=r1)
assert loc is r1
loc = rm.return_constant(ConstInt(1), selected_reg=r1)
assert loc is r1
loc = rm.return_constant(ConstInt(1))
assert isinstance(loc, ConstInt)
for box in boxes[:-1]:
rm.force_allocate_reg(box)
assert len(asm.moves) == 2 # Const(1) -> r1, twice
assert len(rm.reg_bindings) == 4
rm._check_invariants()
def test_loc_of_const(self):
rm = RegisterManager({})
rm.next_instruction()
assert isinstance(rm.loc(ConstInt(1)), ConstInt)
def test_call_support(self):
class XRegisterManager(RegisterManager):
save_around_call_regs = [r1, r2]
def call_result_location(self, v):
return r1
fm = TFrameManager()
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
rm = XRegisterManager(longevity, frame_manager=fm,
assembler=asm)
for b in boxes[:-1]:
rm.force_allocate_reg(b)
rm.position = 0
rm.before_call()
assert len(rm.reg_bindings) == 2
assert fm.get_frame_depth() == 2
assert len(asm.moves) == 2
rm._check_invariants()
rm.after_call(boxes[-1])
assert len(rm.reg_bindings) == 3
rm._check_invariants()
def test_call_support_save_all_regs(self):
class XRegisterManager(RegisterManager):
save_around_call_regs = [r1, r2]
def call_result_location(self, v):
return r1
fm = TFrameManager()
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
rm = XRegisterManager(longevity, frame_manager=fm,
assembler=asm)
for b in boxes[:-1]:
rm.force_allocate_reg(b)
rm.before_call(save_all_regs=True)
assert len(rm.reg_bindings) == 0
assert fm.get_frame_depth() == 4
assert len(asm.moves) == 4
rm._check_invariants()
rm.after_call(boxes[-1])
assert len(rm.reg_bindings) == 1
rm._check_invariants()
def test_different_frame_width(self):
class XRegisterManager(RegisterManager):
pass
fm = TFrameManager()
b0 = InputArgInt()
longevity = {b0: Lifetime(0, 1)}
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
f0 = InputArgFloat()
longevity = {f0: Lifetime(0, 1)}
xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm)
xrm.loc(f0)
rm.loc(b0)
assert fm.get_frame_depth() == 3
def test_spilling(self):
b0, b1, b2, b3, b4, b5 = newboxes(0, 1, 2, 3, 4, 5)
longevity = {b0: Lifetime(0, 3), b1: Lifetime(0, 3),
b3: Lifetime(0, 5), b2: Lifetime(0, 2),
b4: Lifetime(1, 4), b5: Lifetime(1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
for b in b0, b1, b2, b3:
rm.force_allocate_reg(b)
assert len(rm.free_regs) == 0
rm.next_instruction()
loc = rm.loc(b3)
spilled = rm.force_allocate_reg(b4)
assert spilled is loc
spilled2 = rm.force_allocate_reg(b5)
assert spilled2 is loc
rm._check_invariants()
def test_spilling_furthest_next_real_use(self):
b0, b1, b2, b3, b4, b5 = newboxes(0, 1, 2, 3, 4, 5)
longevity = {b0: Lifetime(0, 3, [1, 2, 3]), b1: Lifetime(0, 3, [3]),
b3: Lifetime(0, 4, [1, 2, 3, 4]), b2: Lifetime(0, 2),
b4: Lifetime(1, 4), b5: Lifetime(1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
for b in b0, b1, b2, b3:
rm.force_allocate_reg(b)
assert len(rm.free_regs) == 0
rm.next_instruction()
loc = rm.loc(b1)
spilled = rm.force_allocate_reg(b4)
assert spilled is loc
spilled2 = rm.force_allocate_reg(b5)
assert spilled2 is loc
rm._check_invariants()
def test_spill_useless_vars_first(self):
b0, b1, b2, b3, b4, b5 = newboxes(0, 1, 2, 3, 4, 5)
longevity = {b0: Lifetime(0, 5), b1: Lifetime(0, 10),
# b2 and b3 become useless but b3 lives longer
b3: Lifetime(0, 7, 3), b2: Lifetime(0, 6, 3),
b4: Lifetime(4, 5), b5: Lifetime(4, 7)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
for b in b0, b1, b2, b3:
rm.force_allocate_reg(b)
rm.position = 4
assert len(rm.free_regs) == 0
loc = rm.loc(b3)
spilled = rm.force_allocate_reg(b4)
assert spilled is loc
loc = rm.loc(b2)
spilled2 = rm.force_allocate_reg(b5)
assert spilled2 is loc
rm._check_invariants()
def test_hint_frame_locations_1(self):
for hint_value in range(11):
b0, = newboxes(0)
fm = TFrameManager()
fm.hint_frame_pos[b0] = hint_value
blist = newboxes(*range(10))
for b1 in blist:
fm.loc(b1)
for b1 in blist:
fm.mark_as_free(b1)
assert fm.get_frame_depth() == 10
loc = fm.loc(b0)
if hint_value < 10:
expected = hint_value
else:
expected = 0
assert fm.get_loc_index(loc) == expected
assert fm.get_frame_depth() == 10
def test_linkedlist(self):
class Loc(object):
def __init__(self, pos, size, tp):
self.pos = pos
self.size = size
self.tp = tp
class FrameManager(object):
@staticmethod
def get_loc_index(item):
return item.pos
@staticmethod
def frame_pos(pos, tp):
if tp == 13:
size = 2
else:
size = 1
return Loc(pos, size, tp)
fm = FrameManager()
l = LinkedList(fm)
l.append(1, Loc(1, 1, 0))
l.append(1, Loc(4, 1, 0))
l.append(1, Loc(2, 1, 0))
l.append(1, Loc(0, 1, 0))
assert l.master_node.val == 0
assert l.master_node.next.val == 1
assert l.master_node.next.next.val == 2
assert l.master_node.next.next.next.val == 4
assert l.master_node.next.next.next.next is None
item = l.pop(1, 0)
assert item.pos == 0
item = l.pop(1, 0)
assert item.pos == 1
item = l.pop(1, 0)
assert item.pos == 2
item = l.pop(1, 0)
assert item.pos == 4
assert l.pop(1, 0) is None
l.append(1, Loc(1, 1, 0))
l.append(1, Loc(5, 1, 0))
l.append(1, Loc(2, 1, 0))
l.append(1, Loc(0, 1, 0))
item = l.pop(2, 13)
assert item.tp == 13
assert item.pos == 0
assert item.size == 2
assert l.pop(2, 0) is None # 2 and 4
l.append(1, Loc(4, 1, 0))
item = l.pop(2, 13)
assert item.pos == 4
assert item.size == 2
assert l.pop(1, 0).pos == 2
assert l.pop(1, 0) is None
l.append(2, Loc(1, 2, 0))
# this will not work because the result will be odd
assert l.pop(2, 13) is None
l.append(1, Loc(3, 1, 0))
item = l.pop(2, 13)
assert item.pos == 2
assert item.tp == 13
assert item.size == 2
def test_frame_manager_basic_equal(self):
b0, b1 = newboxes(0, 1)
fm = TFrameManagerEqual()
loc0 = fm.loc(b0)
assert fm.get_loc_index(loc0) == 0
#
assert fm.get(b1) is None
loc1 = fm.loc(b1)
assert fm.get_loc_index(loc1) == 1
assert fm.get(b1) == loc1
#
loc0b = fm.loc(b0)
assert loc0b == loc0
#
fm.loc(InputArgInt())
assert fm.get_frame_depth() == 3
#
f0 = InputArgFloat()
locf0 = fm.loc(f0)
assert fm.get_loc_index(locf0) == 3
assert fm.get_frame_depth() == 4
#
f1 = InputArgFloat()
locf1 = fm.loc(f1)
assert fm.get_loc_index(locf1) == 4
assert fm.get_frame_depth() == 5
fm.mark_as_free(b1)
assert fm.freelist
b2 = InputArgInt()
fm.loc(b2) # should be in the same spot as b1 before
assert fm.get(b1) is None
assert fm.get(b2) == loc1
fm.mark_as_free(b0)
p0 = InputArgRef()
ploc = fm.loc(p0)
assert fm.get_loc_index(ploc) == 0
assert fm.get_frame_depth() == 5
assert ploc != loc1
p1 = InputArgRef()
p1loc = fm.loc(p1)
assert fm.get_loc_index(p1loc) == 5
assert fm.get_frame_depth() == 6
fm.mark_as_free(p0)
p2 = InputArgRef()
p2loc = fm.loc(p2)
assert p2loc == ploc
assert len(fm.freelist) == 0
for box in fm.bindings.keys():
fm.mark_as_free(box)
fm.bind(InputArgRef(), FakeFramePos(3, 'r'))
assert len(fm.freelist) == 6
def test_frame_manager_basic(self):
b0, b1 = newboxes(0, 1)
fm = TFrameManager()
loc0 = fm.loc(b0)
assert fm.get_loc_index(loc0) == 0
#
assert fm.get(b1) is None
loc1 = fm.loc(b1)
assert fm.get_loc_index(loc1) == 1
assert fm.get(b1) == loc1
#
loc0b = fm.loc(b0)
assert loc0b == loc0
#
fm.loc(InputArgInt())
assert fm.get_frame_depth() == 3
#
f0 = InputArgFloat()
locf0 = fm.loc(f0)
# can't be odd
assert fm.get_loc_index(locf0) == 4
assert fm.get_frame_depth() == 6
#
f1 = InputArgFloat()
locf1 = fm.loc(f1)
assert fm.get_loc_index(locf1) == 6
assert fm.get_frame_depth() == 8
fm.mark_as_free(b1)
assert fm.freelist
b2 = InputArgInt()
fm.loc(b2) # should be in the same spot as b1 before
assert fm.get(b1) is None
assert fm.get(b2) == loc1
fm.mark_as_free(b0)
p0 = InputArgRef()
ploc = fm.loc(p0)
assert fm.get_loc_index(ploc) == 0
assert fm.get_frame_depth() == 8
assert ploc != loc1
p1 = InputArgRef()
p1loc = fm.loc(p1)
assert fm.get_loc_index(p1loc) == 3
assert fm.get_frame_depth() == 8
fm.mark_as_free(p0)
p2 = InputArgRef()
p2loc = fm.loc(p2)
assert p2loc == ploc
assert len(fm.freelist) == 0
fm.mark_as_free(b2)
f3 = InputArgFloat()
fm.mark_as_free(p2)
floc = fm.loc(f3)
assert fm.get_loc_index(floc) == 0
for box in fm.bindings.keys():
fm.mark_as_free(box)
class TestForceResultInReg(object):
# use it's own class since there are so many cases
def test_force_result_in_reg_1(self):
# var in reg, dies
b0, b1 = newboxes(0, 0)
longevity = {b0: Lifetime(0, 1), b1: Lifetime(1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
loc0 = rm.force_allocate_reg(b0)
rm._check_invariants()
rm.next_instruction()
loc = rm.force_result_in_reg(b1, b0)
assert loc is loc0
assert len(asm.moves) == 0
rm._check_invariants()
def test_force_result_in_reg_2(self):
# var in reg, survives
b0, b1 = newboxes(0, 0)
longevity = {b0: Lifetime(0, 2), b1: Lifetime(1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
loc0 = rm.force_allocate_reg(b0)
rm._check_invariants()
rm.next_instruction()
loc = rm.force_result_in_reg(b1, b0)
assert loc is not loc0
assert rm.loc(b0) is loc0
assert len(asm.moves) == 1
rm._check_invariants()
def test_force_result_in_reg_3(self):
# var in reg, survives, no free registers
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
longevity = {b0: Lifetime(0, 2), b1: Lifetime(0, 2),
b3: Lifetime(0, 2), b2: Lifetime(0, 2),
b4: Lifetime(1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
for b in b0, b1, b2, b3:
rm.force_allocate_reg(b)
assert not len(rm.free_regs)
rm._check_invariants()
rm.next_instruction()
rm.force_result_in_reg(b4, b0)
rm._check_invariants()
assert len(asm.moves) == 1
def test_force_result_in_reg_4(self):
b0, b1 = newboxes(0, 0)
longevity = {b0: Lifetime(0, 1), b1: Lifetime(0, 1)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
fm.loc(b0)
rm.force_result_in_reg(b1, b0)
rm._check_invariants()
loc = rm.loc(b1)
assert isinstance(loc, FakeReg)
loc = rm.loc(b0)
assert isinstance(loc, FakeFramePos)
assert len(asm.moves) == 1
def test_force_result_in_reg_const(self):
# const
boxes, longevity = boxes_and_longevity(2)
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm,
assembler=asm)
rm.next_instruction()
c = ConstInt(0)
rm.force_result_in_reg(boxes[0], c)
rm._check_invariants()
# some tests where the result is supposed to go in a fixed register
def test_force_result_in_reg_fixed_reg_1(self):
# var in reg, dies
b0, b1 = newboxes(0, 0)
longevity = LifetimeManager({b0: Lifetime(0, 1), b1: Lifetime(1, 3)})
longevity.try_use_same_register(b0, b1)
longevity.fixed_register(1, r1, b1)
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
loc0 = rm.force_allocate_reg(b0)
rm._check_invariants()
rm.next_instruction()
loc = rm.force_result_in_reg(b1, b0)
assert loc is loc0
assert loc is r1
assert len(asm.moves) == 0
rm._check_invariants()
def test_force_result_in_reg_fixed_reg_2(self):
# var in reg, survives
b0, b1 = newboxes(0, 0)
longevity = LifetimeManager({b0: Lifetime(0, 2), b1: Lifetime(1, 3)})
# has no effect, lifetimes overlap
longevity.try_use_same_register(b0, b1)
longevity.fixed_register(1, r1, b1)
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
loc0 = rm.force_allocate_reg(b0)
rm._check_invariants()
rm.next_instruction()
loc = rm.force_result_in_reg(b1, b0)
assert loc is not loc0
assert rm.loc(b0) is loc0
assert loc is r1
assert len(asm.moves) == 1
rm._check_invariants()
# _____________________________________________________
# tests that assign registers in a mocked way for a fake CPU
class RegisterManager2(BaseRegMan):
all_regs = [r0, r1, r2, r3, r4, r5, r6, r7]
save_around_call_regs = [r0, r1, r2, r3]
frame_reg = r8
# calling conventions: r0 is result
# r1 r2 r3 are arguments and caller-saved registers
# r4 r5 r6 r7 are callee-saved registers
def convert_to_imm(self, v):
return v.value
def call_result_location(self, v):
return r0
class FakeRegalloc(BaseRegalloc):
def __init__(self):
self.assembler = MockAsm()
def fake_prepare_loop(self, inputargs, operations, looptoken, inputarg_locs=None):
operations = self._prepare(inputargs, operations, [])
self.operations = operations
if inputarg_locs is None:
self._set_initial_bindings(inputargs, looptoken)
else:
for v, loc in zip(inputargs, inputarg_locs):
self.rm.reg_bindings[v] = loc
self.rm.free_regs.remove(loc)
self.possibly_free_vars(list(inputargs))
self._add_fixed_registers()
return operations
def _prepare(self, inputargs, operations, allgcrefs):
self.fm = TFrameManager()
# compute longevity of variables
longevity = compute_vars_longevity(inputargs, operations)
self.longevity = longevity
self.rm = RegisterManager2(
longevity, assembler=self.assembler, frame_manager=self.fm)
return operations
def possibly_free_var(self, var):
self.rm.possibly_free_var(var)
def possibly_free_vars(self, vars):
for var in vars:
if var is not None: # xxx kludgy
self.possibly_free_var(var)
def possibly_free_vars_for_op(self, op):
for i in range(op.numargs()):
var = op.getarg(i)
if var is not None: # xxx kludgy
self.possibly_free_var(var)
if op.type != 'v':
self.possibly_free_var(op)
def loc(self, x):
return self.rm.loc(x)
def force_allocate_reg_or_cc(self, var):
assert var.type == INT
if self.next_op_can_accept_cc(self.operations, self.rm.position):
# hack: return the ebp location to mean "lives in CC". This
# ebp will not actually be used, and the location will be freed
# after the next op as usual.
self.rm.force_allocate_frame_reg(var)
return r8
else:
# else, return a regular register (not ebp).
return self.rm.force_allocate_reg(var, need_lower_byte=True)
def fake_allocate(self, loop):
from rpython.jit.backend.x86.jump import remap_frame_layout
def emit(*args):
self.assembler.emitted.append(args)
for i, op in enumerate(loop.operations):
self.rm.position = i
opnum = op.getopnum()
opname = op.getopname()
if rop.is_comparison(opnum):
locs = [self.loc(x) for x in op.getarglist()]
loc = self.force_allocate_reg_or_cc(op)
emit(opname, loc, locs)
elif opname.startswith("int_"):
locs = [self.loc(x) for x in op.getarglist()]
loc = self.rm.force_result_in_reg(
op, op.getarg(0), op.getarglist())
emit(opname, loc, locs[1:])
elif op.is_guard():
fail_locs = [self.loc(x) for x in op.getfailargs()]
emit(opname, self.loc(op.getarg(0)), fail_locs)
elif rop.is_call(opnum):
# calling convention!
src_locs = [self.loc(x) for x in op.getarglist()[1:]]
self.rm.before_call()
loc = self.rm.after_call(op)
dst_locs = [r1, r2, r3][:len(src_locs)]
remap_frame_layout(self.assembler, src_locs, dst_locs, r8)
emit(opname, loc, dst_locs)
elif opname == "label":
descr = op.getdescr()
locs = [self.loc(x) for x in op.getarglist()]
emit(opname, locs)
descr._fake_arglocs = locs
lastop = loop.operations[-1]
if lastop.getopname() == "jump" and lastop.getdescr() is descr:
# now we know the places, add hints
for i, r in enumerate(locs):
if isinstance(r, FakeReg):
self.longevity.fixed_register(
len(loop.operations) - 1, r, lastop.getarg(i))
elif opname == "jump":
src_locs = [self.loc(x) for x in op.getarglist()]
dst_locs = op.getdescr()._fake_arglocs
remap_frame_layout(self.assembler, src_locs, dst_locs, r8)
emit("jump", dst_locs)
else:
locs = [self.loc(x) for x in op.getarglist()]
if op.type != "v":
loc = self.rm.force_allocate_reg(op)
emit(opname, loc, locs)
else:
emit(opname, locs)
self.possibly_free_vars_for_op(op)
return self.assembler.emitted
def _add_fixed_registers(self):
for i, op in enumerate(self.operations):
opnum = op.getopnum()
opname = op.getopname()
args = op.getarglist()
if rop.is_call(opnum):
# calling convention!
arglist = op.getarglist()[1:]
for arg, reg in zip(arglist + [None] * (3 - len(arglist)), [r1, r2, r3]):
self.longevity.fixed_register(i, reg, arg)
self.longevity.fixed_register(i, r0, op)
elif opname.startswith("int_"):
if not args[0].is_constant():
self.longevity.try_use_same_register(args[0], op)
CPU = getcpuclass()
class TestFullRegallocFakeCPU(object):
# XXX copy-paste from test_regalloc_integration
cpu = CPU(None, None)
cpu.setup_once()
targettoken = TargetToken()
targettoken2 = TargetToken()
fdescr1 = BasicFailDescr(1)
fdescr2 = BasicFailDescr(2)
fdescr3 = BasicFailDescr(3)
def setup_method(self, meth):
self.targettoken._ll_loop_code = 0
self.targettoken2._ll_loop_code = 0
def f1(x):
return x+1
def f2(x, y):
return x*y
def f10(*args):
assert len(args) == 10
return sum(args)
F1PTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed))
F2PTR = lltype.Ptr(lltype.FuncType([lltype.Signed]*2, lltype.Signed))
F10PTR = lltype.Ptr(lltype.FuncType([lltype.Signed]*10, lltype.Signed))
f1ptr = llhelper(F1PTR, f1)
f2ptr = llhelper(F2PTR, f2)
f10ptr = llhelper(F10PTR, f10)
f1_calldescr = cpu.calldescrof(F1PTR.TO, F1PTR.TO.ARGS, F1PTR.TO.RESULT,
EffectInfo.MOST_GENERAL)
f2_calldescr = cpu.calldescrof(F2PTR.TO, F2PTR.TO.ARGS, F2PTR.TO.RESULT,
EffectInfo.MOST_GENERAL)
f10_calldescr = cpu.calldescrof(F10PTR.TO, F10PTR.TO.ARGS, F10PTR.TO.RESULT,
EffectInfo.MOST_GENERAL)
namespace = locals().copy()
def parse(self, s, boxkinds=None, namespace=None):
return parse(s, self.cpu, namespace or self.namespace,
boxkinds=boxkinds)
def allocate(self, s, inputarg_locs=None):
loop = self.parse(s)
self.loop = loop
regalloc = FakeRegalloc()
regalloc.fake_prepare_loop(loop.inputargs, loop.operations,
loop.original_jitcell_token, inputarg_locs)
self.regalloc = regalloc
return regalloc.fake_allocate(loop)
def test_simple(self):
ops = '''
[i0]
label(i0, descr=targettoken)
i1 = int_add(i0, 1)
i2 = int_lt(i1, 20)
guard_true(i2) [i1]
jump(i1, descr=targettoken)
'''
emitted = self.allocate(ops)
fp0 = FakeFramePos(0, INT)
assert emitted == [
("label", [fp0]),
("move", r0, fp0),
("int_add", r0, [1]),
("int_lt", r8, [r0, 20]),
("guard_true", r8, [r0]),
("move", fp0, r0),
("jump", [fp0]),
]
def test_call(self):
ops = '''
[i0]
i1 = int_mul(i0, 2)
i2 = call_i(ConstClass(f1ptr), i1, descr=f1_calldescr)
guard_false(i2) []
'''
emitted = self.allocate(ops)
fp0 = FakeFramePos(0, INT)
assert emitted == [
("move", r1, fp0),
("int_mul", r1, [2]),
("call_i", r0, [r1]),
("guard_false", r0, []),
]
def test_call_2(self):
ops = '''
[i0, i1]
i2 = int_mul(i0, 2)
i3 = int_add(i1, 1)
i4 = call_i(ConstClass(f1ptr), i2, descr=f1_calldescr)
guard_false(i4) [i3]
'''
emitted = self.allocate(ops)
fp0 = FakeFramePos(0, INT)
fp1 = FakeFramePos(1, INT)
assert emitted == [
("move", r1, fp0),
("int_mul", r1, [2]),
("move", r4, fp1), # r4 gets picked since it's callee-saved
("int_add", r4, [1]),
("call_i", r0, [r1]),
("guard_false", r0, [r4]),
]
def test_coalescing(self):
ops = '''
[i0]
i1 = int_mul(i0, 5)
i5 = int_is_true(i1)
guard_true(i5) []
i2 = int_mul(i0, 2)
i3 = int_add(i2, 1) # i2 and i3 need to be coalesced
i4 = call_i(ConstClass(f1ptr), i3, descr=f1_calldescr)
guard_false(i4) []
'''
emitted = self.allocate(ops)
fp0 = FakeFramePos(0, INT)
assert emitted == [
('move', r1, fp0),
('int_mul', r1, [5]),
('int_is_true', r8, [r1]),
('guard_true', r8, []),
('move', r1, fp0),
('int_mul', r1, [2]),
('int_add', r1, [1]),
('call_i', r0, [r1]),
('guard_false', r0, [])
]
def test_specify_inputarg_locs(self):
ops = '''
[i0]
i1 = int_mul(i0, 5)
i5 = int_is_true(i1)
guard_true(i5) []
'''
emitted = self.allocate(ops, [r0])
assert emitted == [
('int_mul', r0, [5]),
('int_is_true', r8, [r0]),
('guard_true', r8, [])
]
def test_coalescing_first_var_already_in_different_reg(self):
ops = '''
[i0]
i2 = int_mul(i0, 2)
i3 = int_add(i2, 1) # i2 and i3 need to be coalesced
i4 = call_i(ConstClass(f1ptr), i3, descr=f1_calldescr)
guard_false(i4) [i0]
'''
emitted = self.allocate(ops, [r5])
assert emitted == [
('move', r1, r5),
('int_mul', r1, [2]),
('int_add', r1, [1]),
('call_i', r0, [r1]),
('guard_false', r0, [r5])
]
def test_call_spill_furthest_use(self):
# here, i2 should be spilled, because its use is farther away
ops = '''
[i0, i1, i2, i3, i4, i5, i6]
i8 = call_i(ConstClass(f2ptr), i0, i1, descr=f2_calldescr)
escape_i(i3)
escape_i(i2)
guard_false(i8) [i2, i3, i4, i5, i6]
'''
emitted = self.allocate(ops, [r1, r2, r0, r3, r4, r5, r6])
fp0 = FakeFramePos(0, INT)
assert emitted == [
('move', fp0, r0),
('move', r7, r3),
('call_i', r0, [r1, r2]),
('escape_i', r1, [r7]),
('escape_i', r1, [fp0]),
('guard_false', r0, [fp0, r7, r4, r5, r6])
]
@py.test.mark.skip("messy - later")
def test_call_spill(self):
# i0 dies, i1 is the argument, the other fight for caller-saved regs
# all_regs = [r0, r1, r2, r3, r4, r5, r6, r7]
# save_around_call_regs = [r0, r1, r2, r3]
ops = '''
[i0, i1, i2, i3, i4, i5, i6]
i8 = call_i(ConstClass(f2ptr), i1, i0, descr=f2_calldescr)
guard_false(i8) [i2, i3, i4, i5, i6]
'''
emitted = self.allocate(ops, [r5, r1, r0, r2, r3, r6, r7])
assert emitted == ["???"]
def test_jump_hinting(self):
ops = '''
[i0, i1]
i2 = escape_i()
i3 = escape_i()
label(i2, i3, descr=targettoken)
i4 = escape_i()
i5 = escape_i()
jump(i4, i5, descr=targettoken)
'''
emitted = self.allocate(ops)
assert emitted == [
('escape_i', r0, []),
('escape_i', r1, []),
('label', [r0, r1]),
('escape_i', r0, []),
('escape_i', r1, []),
('jump', [r0, r1])
]
|
bmtk/utils/sonata/utils.py | tjbanks/bmtk | 216 | 12721725 | # Copyright 2017. <NAME>. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import h5py
import pandas as pd
import numpy as np
MAGIC_ATTR = 'magic'
MAGIC_VAL = 0x0A7A
VERSION_ATTR = 'version'
VERSION_NA = 'NA'
VERSION_CURRENT = '0.1'
try:
ver_split = VERSION_CURRENT.split('.')
VERSION_MAJOR = ver_split[0]
VERSION_MINOR = ver_split[1]
except (IndexError, AttributeError) as err:
VERSION_MAJOR = 0
VERSION_MINOR = 1
def listify(files):
# TODO: change this to include any iterable datastructures (sets, panda sequences, etc)
if not isinstance(files, (list, tuple)):
return [files]
else:
return files
def load_h5(h5file, mode='r'):
# TODO: Allow for h5py.Group also
if isinstance(h5file, h5py.File):
return h5file
return h5py.File(h5file, mode)
def load_csv(csvfile):
# TODO: make the separator more flexible
if isinstance(csvfile, pd.DataFrame):
return csvfile
# TODO: check if it is csv object and convert to a pd dataframe
return pd.read_csv(csvfile, sep=' ', na_values='NONE')
def get_attribute_h5(h5obj, attribut_name, default=None):
val = h5obj.attrs.get(attribut_name, default)
if using_py3 and isinstance(val, bytes):
# There is an but with h5py returning unicode/str based attributes as bytes
val = val.decode()
return val
def check_magic(hdf5_file):
"""Check the magic attribute exists according to the sonata format"""
h5_file_obj = load_h5(hdf5_file)
if MAGIC_ATTR not in h5_file_obj.attrs:
raise Exception('File {} missing top-level \"{}\" attribute.'.format(h5_file_obj.filename, MAGIC_ATTR))
elif np.uint32(get_attribute_h5(hdf5_file, MAGIC_ATTR)) != MAGIC_VAL:
raise Exception('File {} has unexpected magic value (expected {})'.format(h5_file_obj.filename, MAGIC_VAL))
return True
def get_version(hdf5_file):
h5_file_obj = load_h5(hdf5_file)
if VERSION_ATTR not in h5_file_obj.attrs:
return VERSION_NA
else:
version_val = get_attribute_h5(h5_file_obj, VERSION_ATTR)
version_str = str(version_val[0])
for ver_sub in version_val[1:]:
version_str += '.{}'.format(ver_sub)
return version_str
def add_hdf5_magic(hdf5_handle):
hdf5_handle['/'].attrs['magic'] = np.uint32(0x0A7A)
def add_hdf5_version(hdf5_handle):
hdf5_handle['/'].attrs['version'] = [np.uint32(VERSION_MAJOR), np.uint32(VERSION_MINOR)]
def get_node_ids(nodes_path, population):
# Used by PoissonSpikesGenerator
with h5py.File(nodes_path, 'r') as h5:
node_ids = h5['/nodes'][population]['node_id'][()]
return node_ids
if sys.version_info[0] == 3:
using_py3 = True
range_itr = range
else:
using_py3 = False
range_itr = xrange
|
alipay/aop/api/response/KoubeiTradeOrderAggregateRefundResponse.py | antopen/alipay-sdk-python-all | 213 | 12721736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiTradeOrderAggregateRefundResponse(AlipayResponse):
def __init__(self):
super(KoubeiTradeOrderAggregateRefundResponse, self).__init__()
self._gmt_refund_time = None
self._order_no = None
self._order_status = None
self._out_order_no = None
self._out_refund_no = None
self._refund_amount = None
self._refund_buyer_amount = None
self._refund_discount_amount = None
self._refund_mdiscount_amount = None
self._refund_order_id = None
self._refund_real_amount = None
self._trade_no = None
@property
def gmt_refund_time(self):
return self._gmt_refund_time
@gmt_refund_time.setter
def gmt_refund_time(self, value):
self._gmt_refund_time = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def order_status(self):
return self._order_status
@order_status.setter
def order_status(self, value):
self._order_status = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def out_refund_no(self):
return self._out_refund_no
@out_refund_no.setter
def out_refund_no(self, value):
self._out_refund_no = value
@property
def refund_amount(self):
return self._refund_amount
@refund_amount.setter
def refund_amount(self, value):
self._refund_amount = value
@property
def refund_buyer_amount(self):
return self._refund_buyer_amount
@refund_buyer_amount.setter
def refund_buyer_amount(self, value):
self._refund_buyer_amount = value
@property
def refund_discount_amount(self):
return self._refund_discount_amount
@refund_discount_amount.setter
def refund_discount_amount(self, value):
self._refund_discount_amount = value
@property
def refund_mdiscount_amount(self):
return self._refund_mdiscount_amount
@refund_mdiscount_amount.setter
def refund_mdiscount_amount(self, value):
self._refund_mdiscount_amount = value
@property
def refund_order_id(self):
return self._refund_order_id
@refund_order_id.setter
def refund_order_id(self, value):
self._refund_order_id = value
@property
def refund_real_amount(self):
return self._refund_real_amount
@refund_real_amount.setter
def refund_real_amount(self, value):
self._refund_real_amount = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
def parse_response_content(self, response_content):
response = super(KoubeiTradeOrderAggregateRefundResponse, self).parse_response_content(response_content)
if 'gmt_refund_time' in response:
self.gmt_refund_time = response['gmt_refund_time']
if 'order_no' in response:
self.order_no = response['order_no']
if 'order_status' in response:
self.order_status = response['order_status']
if 'out_order_no' in response:
self.out_order_no = response['out_order_no']
if 'out_refund_no' in response:
self.out_refund_no = response['out_refund_no']
if 'refund_amount' in response:
self.refund_amount = response['refund_amount']
if 'refund_buyer_amount' in response:
self.refund_buyer_amount = response['refund_buyer_amount']
if 'refund_discount_amount' in response:
self.refund_discount_amount = response['refund_discount_amount']
if 'refund_mdiscount_amount' in response:
self.refund_mdiscount_amount = response['refund_mdiscount_amount']
if 'refund_order_id' in response:
self.refund_order_id = response['refund_order_id']
if 'refund_real_amount' in response:
self.refund_real_amount = response['refund_real_amount']
if 'trade_no' in response:
self.trade_no = response['trade_no']
|
test/test_text_aug.py | ji3g4m6zo6/JioNLP | 1,063 | 12721755 | <reponame>ji3g4m6zo6/JioNLP
import unittest
import jionlp as jio
class TestTextAug(unittest.TestCase):
""" 测试文本数据增强工具 """
def test_ReplaceEntity(self):
""" test class ReplaceEntity """
# 准备的词典
entities_dict = {
"Person": {"马成宇": 1},
"Company": {"百度": 4, "国力教育公司": 1},
"Organization": {"延平区人民法院": 1}
}
# 输入的序列标注样本
text = '腾讯致力于解决冲突,阿里巴巴致力于玩。小马爱玩。'
entities = [{'type': 'Company', 'text': '腾讯', 'offset': (0, 2)},
{'type': 'Company', 'text': '阿里巴巴', 'offset': (10, 14)},
{'type': 'Person', 'text': '小马', 'offset': (19, 21)}]
replace_entity = jio.ReplaceEntity(entities_dict)
texts, entities = replace_entity(text, entities)
# 预期结果
standard_texts = ['腾讯致力于解决冲突,国力教育公司致力于玩。小马爱玩。',
'百度致力于解决冲突,阿里巴巴致力于玩。小马爱玩。',
'腾讯致力于解决冲突,阿里巴巴致力于玩。马成宇爱玩。']
standard_entities = [
[{'type': 'Company', 'text': '腾讯', 'offset': (0, 2)},
{'text': '国力教育公司', 'type': 'Company', 'offset': [10, 16]},
{'text': '小马', 'type': 'Person', 'offset': (21, 23)}],
[{'text': '百度', 'type': 'Company', 'offset': [0, 2]},
{'text': '阿里巴巴', 'type': 'Company', 'offset': (10, 14)},
{'text': '小马', 'type': 'Person', 'offset': (19, 21)}],
[{'type': 'Company', 'text': '腾讯', 'offset': (0, 2)},
{'type': 'Company', 'text': '阿里巴巴', 'offset': (10, 14)},
{'text': '马成宇', 'type': 'Person', 'offset': [19, 22]}]]
self.assertEqual(texts, standard_texts)
self.assertEqual(entities, standard_entities)
# def test_
|
06_prepare/archive/preprocess-spark-text-to-bert.py | ichen20/oreilly_book | 2,327 | 12721757 | from __future__ import print_function
from __future__ import unicode_literals
import time
import sys
import os
import shutil
import csv
import collections
import subprocess
import sys
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'pip', '--upgrade'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'wrapt', '--upgrade', '--ignore-installed'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'tensorflow==2.1.0', '--ignore-installed'])
import tensorflow as tf
print(tf.__version__)
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'transformers==2.8.0'])
from transformers import DistilBertTokenizer
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.sql.functions import *
from pyspark.ml.linalg import DenseVector
from pyspark.sql.functions import split
from pyspark.sql.functions import udf, col
from pyspark.sql.types import *
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
# We set sequences to be at most 128 tokens long.
MAX_SEQ_LENGTH = 64
DATA_COLUMN = 'review_body'
LABEL_COLUMN = 'star_rating'
LABEL_VALUES = [1, 2, 3, 4, 5]
label_map = {}
for (i, label) in enumerate(LABEL_VALUES):
label_map[label] = i
class InputFeatures(object):
"""BERT feature vectors."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class Input(object):
"""A single training/test input for sequence classification."""
def __init__(self, text, label=None):
"""Constructs an Input.
Args:
text: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.text = text
self.label = label
def convert_input(label, text):
# First, we need to preprocess our data so that it matches the data BERT was trained on:
#
# 1. Lowercase our text (if we're using a BERT lowercase model)
# 2. Tokenize it (i.e. "sally says hi" -> ["sally", "says", "hi"])
# 3. Break words into WordPieces (i.e. "calling" -> ["call", "##ing"])
#
# Fortunately, the Transformers tokenizer does this for us!
#
# tokens = tokenizer.tokenize(text_input.text)
# Next, we need to do the following:
#
# 4. Map our words to indexes using a vocab file that BERT provides
# 5. Add special "CLS" and "SEP" tokens (see the [readme](https://github.com/google-research/bert))
# 6. Append "index" and "segment" tokens to each input (see the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf))
#
# Again, the Transformers tokenizer does this for us!
#
encode_plus_tokens = tokenizer.encode_plus(text,
pad_to_max_length=True,
max_length=MAX_SEQ_LENGTH)
# Convert the text-based tokens to ids from the pre-trained BERT vocabulary
input_ids = encode_plus_tokens['input_ids']
# Specifies which tokens BERT should pay attention to (0 or 1)
input_mask = encode_plus_tokens['attention_mask']
# Segment Ids are always 0 for single-sequence tasks (or 1 if two-sequence tasks)
segment_ids = [0] * MAX_SEQ_LENGTH
# Label for our training data (star_rating 1 through 5)
label_id = label_map[label]
return {'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids, 'label_ids': [label_id]}
def list_arg(raw_value):
"""argparse type for a list of strings"""
return str(raw_value).split(',')
def parse_args():
# Unlike SageMaker training jobs (which have `SM_HOSTS` and `SM_CURRENT_HOST` env vars), processing jobs to need to parse the resource config file directly
resconfig = {}
try:
with open('/opt/ml/config/resourceconfig.json', 'r') as cfgfile:
resconfig = json.load(cfgfile)
except FileNotFoundError:
print('/opt/ml/config/resourceconfig.json not found. current_host is unknown.')
pass # Ignore
# Local testing with CLI args
parser = argparse.ArgumentParser(description='Process')
parser.add_argument('--hosts', type=list_arg,
default=resconfig.get('hosts', ['unknown']),
help='Comma-separated list of host names running the job'
)
parser.add_argument('--current-host', type=str,
default=resconfig.get('current_host', 'unknown'),
help='Name of this host running the job'
)
parser.add_argument('--input-data', type=str,
default='/opt/ml/processing/input/data',
)
parser.add_argument('--output-data', type=str,
default='/opt/ml/processing/output',
)
return parser.parse_args()
def transform(spark, s3_input_data, s3_output_train_data, s3_output_validation_data, s3_output_test_data):
print('Processing {} => {}'.format(s3_input_data, s3_output_train_data, s3_output_validation_data, s3_output_test_data))
schema = StructType([
StructField('marketplace', StringType(), True),
StructField('customer_id', StringType(), True),
StructField('review_id', StringType(), True),
StructField('product_id', StringType(), True),
StructField('product_parent', StringType(), True),
StructField('product_title', StringType(), True),
StructField('product_category', StringType(), True),
StructField('star_rating', IntegerType(), True),
StructField('helpful_votes', IntegerType(), True),
StructField('total_votes', IntegerType(), True),
StructField('vine', StringType(), True),
StructField('verified_purchase', StringType(), True),
StructField('review_headline', StringType(), True),
StructField('review_body', StringType(), True),
StructField('review_date', StringType(), True)
])
df_csv = spark.read.csv(path=s3_input_data,
sep='\t',
schema=schema,
header=True,
quote=None)
df_csv.show()
# This dataset should already be clean, but always good to double-check
print('Showing null review_body rows...')
df_csv.where(col('review_body').isNull()).show()
print('Showing cleaned csv')
df_csv_dropped = df_csv.na.drop(subset=['review_body'])
df_csv_dropped.show()
# TODO: Balance
features_df = df_csv_dropped.select(['star_rating', 'review_body'])
features_df.show()
tfrecord_schema = StructType([
StructField("input_ids", ArrayType(IntegerType(), False)),
StructField("input_mask", ArrayType(IntegerType(), False)),
StructField("segment_ids", ArrayType(IntegerType(), False)),
StructField("label_ids", ArrayType(IntegerType(), False))
])
bert_transformer = udf(lambda text, label: convert_input(text, label), tfrecord_schema)
spark.udf.register('bert_transformer', bert_transformer)
transformed_df = features_df.select(bert_transformer('star_rating', 'review_body').alias('tfrecords'))
transformed_df.show(truncate=False)
flattened_df = transformed_df.select('tfrecords.*')
flattened_df.show()
# Split 90-5-5%
train_df, validation_df, test_df = flattened_df.randomSplit([0.9, 0.05, 0.05])
train_df.write.format('tfrecords').option('recordType', 'Example').save(path=s3_output_train_data)
print('Wrote to output file: {}'.format(s3_output_train_data))
validation_df.write.format('tfrecords').option('recordType', 'Example').save(path=s3_output_validation_data)
print('Wrote to output file: {}'.format(s3_output_validation_data))
test_df.write.format('tfrecords').option('recordType', 'Example').save(path=s3_output_test_data)
print('Wrote to output file: {}'.format(s3_output_test_data))
restored_test_df = spark.read.format('tfrecords').option('recordType', 'Example').load(path=s3_output_test_data)
restored_test_df.show()
def main():
spark = SparkSession.builder.appName('AmazonReviewsSparkProcessor').getOrCreate()
# Convert command line args into a map of args
args_iter = iter(sys.argv[1:])
args = dict(zip(args_iter, args_iter))
# Retrieve the args and replace 's3://' with 's3a://' (used by Spark)
s3_input_data = args['s3_input_data'].replace('s3://', 's3a://')
print(s3_input_data)
s3_output_train_data = args['s3_output_train_data'].replace('s3://', 's3a://')
print(s3_output_train_data)
s3_output_validation_data = args['s3_output_validation_data'].replace('s3://', 's3a://')
print(s3_output_validation_data)
s3_output_test_data = args['s3_output_test_data'].replace('s3://', 's3a://')
print(s3_output_test_data)
transform(spark,
s3_input_data,
'/opt/ml/processing/output/bert/train',
'/opt/ml/processing/output/bert/validation',
'/opt/ml/processing/output/bert/test',
# s3_output_train_data, s3_output_validation_data, s3_output_test_data
)
if __name__ == "__main__":
main()
|
examples/plot_text.py | mewbak/hypertools | 1,681 | 12721767 | <filename>examples/plot_text.py
# -*- coding: utf-8 -*-
"""
=============================
Plotting text
=============================
To plot text, simply pass the text data to the plot function. By default, the
text samples will be transformed into a vector of word counts and then modeled
using Latent Dirichlet Allocation (# of topics = 100) using a model fit to a
large sample of wikipedia pages. If you specify semantic=None, the word
count vectors will be plotted. To convert the text t0 a matrix (or list of
matrices), we also expose the format_data function.
"""
# Code source: <NAME>
# License: MIT
# load hypertools
import hypertools as hyp
# load the data
data = [['i like cats alot', 'cats r pretty cool', 'cats are better than dogs'],
['dogs rule the haus', 'dogs are my jam', 'dogs are a mans best friend'],
'i haz a cheezeburger?']
# plot it
hyp.plot(data, 'o')
# convert text to matrix without plotting
# mtx = hyp.tools.format_data(data, vectorizer='TfidfVectorizer', semantic='NMF')
|
packages/network/speedtest.py | madstk1/leon | 9,211 | 12721788 | <reponame>madstk1/leon
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# The SpeedTest package will give you information about your network speed
# Author: <NAME>
# Date: 2019-03-09
# Based on the package https://github.com/sivel/speedtest-cli
import utils
import os
import sys
import subprocess
import re
def run(string, entities):
"""The SpeedTest package will give you information about your network speed """
utils.output('inter', 'testing', utils.translate('testing'))
realpath = os.path.dirname(os.path.realpath(__file__))
process = subprocess.Popen(
[sys.executable, realpath + '/speedtest.lib.py', '--simple'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
(output, err) = process.communicate()
p_status = process.wait()
if err:
return utils.output('end', 'error', utils.translate('error'))
rawoutput = output.decode('utf-8')
data = {
'ping': re.search('Ping:(.+?)\n', rawoutput).group(1).strip(),
'download': re.search('Download:(.+?)\n', rawoutput).group(1).strip(),
'upload': re.search('Upload:(.+?)\n', rawoutput).group(1).strip()
}
return utils.output('end', 'done', utils.translate('done', data))
|
esphome/components/xiaomi_rtcgq02lm/binary_sensor.py | OttoWinter/esphomeyaml | 249 | 12721808 | <filename>esphome/components/xiaomi_rtcgq02lm/binary_sensor.py
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import binary_sensor
from esphome.const import (
CONF_LIGHT,
CONF_MOTION,
CONF_TIMEOUT,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_MOTION,
CONF_ID,
)
from esphome.core import TimePeriod
from . import XiaomiRTCGQ02LM
DEPENDENCIES = ["xiaomi_rtcgq02lm"]
CONF_BUTTON = "button"
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(XiaomiRTCGQ02LM),
cv.Optional(CONF_MOTION): binary_sensor.binary_sensor_schema(
device_class=DEVICE_CLASS_MOTION
).extend(
{
cv.Optional(CONF_TIMEOUT, default="5s"): cv.All(
cv.positive_time_period_milliseconds,
cv.Range(max=TimePeriod(milliseconds=65535)),
),
}
),
cv.Optional(CONF_LIGHT): binary_sensor.binary_sensor_schema(
device_class=DEVICE_CLASS_LIGHT
),
cv.Optional(CONF_BUTTON): binary_sensor.binary_sensor_schema().extend(
{
cv.Optional(CONF_TIMEOUT, default="200ms"): cv.All(
cv.positive_time_period_milliseconds,
cv.Range(max=TimePeriod(milliseconds=65535)),
),
}
),
}
)
async def to_code(config):
parent = await cg.get_variable(config[CONF_ID])
if CONF_MOTION in config:
sens = await binary_sensor.new_binary_sensor(config[CONF_MOTION])
cg.add(parent.set_motion(sens))
cg.add(parent.set_motion_timeout(config[CONF_MOTION][CONF_TIMEOUT]))
if CONF_LIGHT in config:
sens = await binary_sensor.new_binary_sensor(config[CONF_LIGHT])
cg.add(parent.set_light(sens))
if CONF_BUTTON in config:
sens = await binary_sensor.new_binary_sensor(config[CONF_BUTTON])
cg.add(parent.set_button(sens))
cg.add(parent.set_button_timeout(config[CONF_BUTTON][CONF_TIMEOUT]))
|
test/test_uri.py | gap892003/mastermind | 389 | 12721829 | import mastermind.uri as uri
def test_is_template():
assert uri.is_template("http://localhost:8000") is False
assert uri.is_template("http://localhost:8000/{a}/") is True
def test_eq():
assert uri.eq("http://localhost:8000", "http://localhost:8000")
assert uri.eq("https://localhost", "https://localhost")
assert not uri.eq("https://localhost", "http://localhost:443")
assert not uri.eq("https://localhost:9443", "http://localhost:9443")
assert not uri.eq("http://localhost/foo", "http://localhost/foo?q=1")
assert not uri.eq("http://localhost/{var}", "http://localhost/{var}")
assert uri.eq("http://localhost/{var}", "http://localhost/value")
assert uri.eq("http://localhost/{?q,p}", "http://localhost/?p=1")
def test_expand_template1():
expected = "http://example.org/value"
assert uri.expand_template("http://example.org/{var}",
"http://example.org/value") == expected
def test_expand_template2():
expected = "http://example.org/value?q=1"
assert uri.expand_template("http://example.org/{var}{?q}",
"http://example.org/value?q=1") == expected
def test_expand_template3():
expected = "http://example.org/?q=1"
assert uri.expand_template("http://example.org/{?q,p}",
"http://example.org?q=1") == expected
def test_query_pairs():
assert uri.query_pairs("") == []
assert uri.query_pairs("q=1") == [("q", "1")]
assert uri.query_pairs("q=1&p=2") == [("q", "1"), ("p", "2")]
def test_path_segments():
assert uri.path_segments("") == []
assert uri.path_segments("/") == []
assert uri.path_segments("/foo") == ["foo"]
assert uri.path_segments("/foo/bar") == ["foo", "bar"]
assert uri.path_segments("/foo/bar/baz") == ["foo", "bar", "baz"]
|
tests/lego/test_hybrid.py | nrupatunga/pynetbuilder | 381 | 12721850 | <reponame>nrupatunga/pynetbuilder
from caffe.proto import caffe_pb2
import google.protobuf as pb
from caffe import layers as L
from caffe import params as P
import caffe
import sys
sys.path.append('../netbuilder')
def test_conv_bn_relu_lego():
from lego.hybrid import ConvBNReLULego
n = caffe.NetSpec()
n.data, n.label = L.ImageData(image_data_param=dict(source='tmp' , batch_size=100),
ntop=2, transform_param=dict(mean_file='tmp'))
params = dict(name='1', kernel_size=5, num_output=16, pad=2,
stride=1, use_global_stats=False)
lego = ConvBNReLULego(params)
lego.attach(n, [n.data])
assert n['conv_' + params['name']] is not None
print >> sys.stderr, n.to_proto()
def test_conv_bn_lego():
from lego.hybrid import ConvBNLego
n = caffe.NetSpec()
n.data, n.label = L.ImageData(image_data_param=dict(source='tmp' , batch_size=100),
ntop=2, transform_param=dict(mean_file='tmp'))
params = dict(name='1', kernel_size=5, num_output=16, pad=2,
stride=1, use_global_stats=True)
lego = ConvBNLego(params)
lego.attach(n, [n.data])
assert n['conv_' + params['name']] is not None
# print >> sys.stderr, n.to_proto()
def test_eltwise_relu_lego():
from lego.hybrid import EltwiseReLULego
n = caffe.NetSpec()
n.data, n.label = L.ImageData(image_data_param=dict(source='tmp' , batch_size=100),
ntop=2, transform_param=dict(mean_file='tmp'))
EltwiseReLULego(dict(name='1')).attach(n, [n.data, n.label])
assert n['eltwise_1'] is not None
# print >> sys.stderr, n.to_proto()
def test_fire_lego():
from lego.hybrid import FireLego
n = caffe.NetSpec()
n.data, n.label = L.ImageData(image_data_param=dict(source='tmp' , batch_size=100),
ntop=2, transform_param=dict(mean_file='tmp'))
params = dict(name='fire1', squeeze_num_output=16, use_global_stats=True)
FireLego(params).attach(n, [n.data])
# print >> sys.stderr, n.to_proto()
def test_inception_v1_lego():
from lego.hybrid import InceptionV1Lego
n = caffe.NetSpec()
n.data, n.label = L.ImageData(image_data_param=dict(source='tmp' , batch_size=100),
ntop=2, transform_param=dict(mean_file='tmp'))
params = dict(name='inception1', num_outputs=[16, 96, 128, 16, 32, 32], use_global_stats=True)
InceptionV1Lego(params).attach(n, [n.data])
# print >> sys.stderr, n.to_proto()
def test_shortcut_lego():
from lego.hybrid import ShortcutLego
n = caffe.NetSpec()
n.data, n.label = L.ImageData(image_data_param=dict(source='tmp' , batch_size=100),
ntop=2, transform_param=dict(mean_file='tmp'))
params = dict(name='block1', shortcut='projection', num_output=64, main_branch='inception', stride=1, use_global_stats=True)
ShortcutLego(params).attach(n, [n.data])
# print >> sys.stderr, n.to_proto()
|
packages/core/minos-microservice-networks/tests/test_networks/test_brokers/test_publishers/test_queued/test_impl.py | sorasful/minos-python | 247 | 12721863 | <filename>packages/core/minos-microservice-networks/tests/test_networks/test_brokers/test_publishers/test_queued/test_impl.py
import unittest
from asyncio import (
sleep,
)
from unittest.mock import (
AsyncMock,
call,
)
from minos.networks import (
BrokerMessageV1,
BrokerMessageV1Payload,
BrokerPublisher,
InMemoryBrokerPublisher,
InMemoryBrokerPublisherQueue,
QueuedBrokerPublisher,
)
class TestQueuedBrokerPublisher(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.impl = InMemoryBrokerPublisher()
self.queue = InMemoryBrokerPublisherQueue()
def test_is_subclass(self):
self.assertTrue(issubclass(QueuedBrokerPublisher, BrokerPublisher))
def test_impl(self):
publisher = QueuedBrokerPublisher(self.impl, self.queue)
self.assertEqual(self.impl, publisher.impl)
def test_queue(self):
publisher = QueuedBrokerPublisher(self.impl, self.queue)
self.assertEqual(self.queue, publisher.queue)
async def test_setup_destroy(self):
impl_setup_mock = AsyncMock()
impl_destroy_mock = AsyncMock()
queue_setup_mock = AsyncMock()
queue_destroy_mock = AsyncMock()
self.impl.setup = impl_setup_mock
self.impl.destroy = impl_destroy_mock
self.queue.setup = queue_setup_mock
self.queue.destroy = queue_destroy_mock
async with QueuedBrokerPublisher(self.impl, self.queue):
self.assertEqual(1, impl_setup_mock.call_count)
self.assertEqual(0, impl_destroy_mock.call_count)
self.assertEqual(1, queue_setup_mock.call_count)
self.assertEqual(0, queue_destroy_mock.call_count)
impl_setup_mock.reset_mock()
impl_destroy_mock.reset_mock()
queue_setup_mock.reset_mock()
queue_destroy_mock.reset_mock()
self.assertEqual(0, impl_setup_mock.call_count)
self.assertEqual(1, impl_destroy_mock.call_count)
self.assertEqual(0, queue_setup_mock.call_count)
self.assertEqual(1, queue_destroy_mock.call_count)
async def test_send(self):
queue_enqueue_mock = AsyncMock()
self.queue.enqueue = queue_enqueue_mock
publisher = QueuedBrokerPublisher(self.impl, self.queue)
message = BrokerMessageV1("foo", BrokerMessageV1Payload("bar"))
await publisher.send(message)
self.assertEqual([call(message)], queue_enqueue_mock.call_args_list)
async def test_run(self):
messages = [
BrokerMessageV1("foo", BrokerMessageV1Payload("bar")),
BrokerMessageV1("bar", BrokerMessageV1Payload("foo")),
]
impl_send_mock = AsyncMock()
self.impl.send = impl_send_mock
async with QueuedBrokerPublisher(self.impl, self.queue) as publisher:
await publisher.send(messages[0])
await publisher.send(messages[1])
await sleep(0.5) # To give time to consume the message
self.assertEqual([call(messages[0]), call(messages[1])], impl_send_mock.call_args_list)
if __name__ == "__main__":
unittest.main()
|
academicstoday_project/teacher/tests/test_lecture.py | LeeDoona/EasyGrading | 146 | 12721872 | # Django & Python
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf.urls.static import static, settings
import json
# Modal
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Lecture
# View
from teacher.views import lecture
# Contants
TEST_USER_EMAIL = "<EMAIL>"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "<PASSWORD>"
TEST_USER_EMAIL2 = "<EMAIL>"
TEST_USER_USERNAME2 = "whalesquid"
TEST_USER_PASSWORD2 = "<PASSWORD>"
# Notes:
# https://docs.djangoproject.com/en/1.7/topics/testing/tools/#assertions
# Create your tests here.
class LectureTestCase(TestCase):
def tearDown(self):
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.all().delete()
def setUp(self):
# Create our Trudy user.
User.objects.create_user(
email=TEST_USER_EMAIL2,
username=TEST_USER_USERNAME2,
password=<PASSWORD>
)
user = User.objects.get(email=TEST_USER_EMAIL2)
teacher = Teacher.objects.create(user=user)
# Create our Student.
User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=<PASSWORD>
).save()
user = User.objects.get(email=TEST_USER_EMAIL)
teacher = Teacher.objects.create(user=user)
# Create a test course.
Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
course = Course.objects.get(id=1)
Lecture.objects.create(
lecture_id=1,
lecture_num=1,
week_num=1,
title="Blade vs Evil",
description="Fighting for the destiny of the Earth.",
course=course,
)
def get_logged_in_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=<PASSWORD>
)
return client
def get_logged_in_trudy_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME2,
password=<PASSWORD>
)
return client
def test_url_resolves_to_lectures_page_view(self):
found = resolve('/teacher/course/1/lectures')
self.assertEqual(found.func, lecture.lectures_page)
def test_lectures_page_with_submission(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/lectures')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'Lectures',response.content)
def test_url_resolves_to_lecture_modal(self):
found = resolve('/teacher/course/1/lecture_modal')
self.assertEqual(found.func, lecture.lecture_modal)
def test_lecture_modal_with_submission(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/lecture_modal',{
'lecture_id': 1,
},**kwargs)
self.assertEqual(response.status_code, 200)
self.assertIn(b'lecture_modal',response.content)
self.assertIn(b'Blade vs Evil',response.content)
def test_lecture_modal_without_submission(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/lecture_modal',{
'lecture_id': 0,
},**kwargs)
self.assertEqual(response.status_code, 200)
self.assertIn(b'lecture_modal',response.content)
def test_save_lecture_with_insert(self):
try:
Lecture.objects.get(lecture_id=1).delete()
except Lecture.DoesNotExist:
pass
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/save_lecture',{
'lecture_id': 0,
'title': 'Blade vs Evil',
'week_num': 1,
'lecture_num': 1,
'description': 'Video of a fight',
'youtube_url': '',
'preferred_service': settings.YOUTUBE_VIDEO_PLAYER,
},**kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
def test_save_lecture_with_update(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/save_lecture',{
'lecture_id': 1,
'title': 'Blade vs Evil',
'week_num': 1,
'lecture_num': 1,
'description': 'Video of a fight',
'youtube_url': '',
'preferred_service': settings.YOUTUBE_VIDEO_PLAYER,
},**kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
def test_delete_lecture_with_empty_records(self):
try:
Lecture.objects.get(lecture_id=1).delete()
except Lecture.DoesNotExist:
pass
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/delete_lecture',{
'lecture_id': 666,
},**kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'record not found')
self.assertEqual(array['status'], 'failed')
def test_delete_lecture_with_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/delete_lecture',{
'lecture_id': 1,
},**kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'deleted')
self.assertEqual(array['status'], 'success')
def test_delete_lecture_with_incorrect_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_trudy_client()
response = client.post('/teacher/course/1/delete_lecture',{
'lecture_id': 1,
},**kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'unauthorized deletion')
self.assertEqual(array['status'], 'failed')
|
fengshen/models/megatron_t5/tokenization_megatron_t5.py | dumpmemory/Fengshenbang-LM | 265 | 12721903 | # coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" T5Tokenizer """
from transformers import BertTokenizer
class T5Tokenizer():
def __init__(self, extra_id_num=118):
self.extra_id_num = extra_id_num
@classmethod
def from_pretrained(self, vocab_path):
self.extra_id_num = 118
self.T5_special_tokens = ['[BOS]', '[EOS]']
for i in range(self.extra_id_num):
self.T5_special_tokens.append(f'<extra_id_{str(i)}>')
tokenizer = BertTokenizer.from_pretrained(vocab_path, additional_special_tokens=self.T5_special_tokens)
return tokenizer
|
test/setup.py | sanjaymsh/fast-entry_points | 121 | 12721918 | from setuptools import setup
import fastentrypoints
setup(
name='dummypkg',
version='0.0.0',
py_modules=['dummy'],
description='dummy package for the test',
entry_points={'console_scripts': ['hello=dummy:main']},
)
|
rotateMatrix180Deg.py | deepak5998/Py | 726 | 12721983 | <filename>rotateMatrix180Deg.py
def rotateMatrixby90(ipMat, size):
opMat = [[0 for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(size):
opMat[j][i] = ipMat[i][j]
return opMat
def reverseMatrix(ipMat, size):
opMat = [[0 for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(size):
opMat[abs(i-(size-1))][j] = ipMat[i][j]
return opMat
def rotateMatrixby180(ipMat, size):
mat_1 = rotateMatrixby90(ipMat, size)
mat_2 = reverseMatrix(mat_1, len(mat_1))
mat_3 = rotateMatrixby90(mat_2, len(mat_2))
mat_4 = reverseMatrix(mat_3, len(mat_3))
return mat_4
def printMatrix(ipMat, size):
for i in range(size):
for j in range(size):
print(ipMat[i][j], end=" ")
print('\n')
matA = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
print("Original-Matrix" + '\n')
printMatrix(matA, len(matA))
print("Rotated-Matrix" + '\n')
rotatedMat = rotateMatrixby90(matA, len(matA))
printMatrix(rotatedMat, len(rotatedMat))
matB = [[1, 5, 9, 13], [2, 6, 10, 14], [3, 7, 11, 15], [4, 8, 12, 16]]
reverseMat = reverseMatrix(matB, len(matB))
print("Reverse-Matrix" + '\n')
printMatrix(reverseMat, len(reverseMat))
print("Rotated-180-Matrix" + '\n')
rotatedMat180 = rotateMatrixby180(matA, len(matA))
printMatrix(rotatedMat180, len(rotatedMat180))
|
auctioning_platform/auctions/auctions/tests/factories.py | nhdinh/smp-modulith | 299 | 12721992 | <filename>auctioning_platform/auctions/auctions/tests/factories.py
import factory
from foundation.value_objects.factories import get_dollars
from auctions.domain.entities import Auction
class AuctionFactory(factory.Factory):
class Meta:
model = Auction
id = factory.Sequence(lambda n: n)
bids = factory.List([])
title = factory.Faker("name")
starting_price = get_dollars("10.00")
ends_at = factory.Faker("future_datetime", end_date="+7d")
ended = False
|
src/test/test_cmd_version.py | arshul/popper | 179 | 12721997 | <reponame>arshul/popper
from click.testing import CliRunner
from popper import __version__
from popper import _version_file
from popper.commands import cmd_version
from .test_common import PopperTest
class TestCommandVersion(PopperTest):
def test_version(self):
self.assertIsNot("0.0.0", __version__)
with self.assertLogs("popper") as test:
result = CliRunner().invoke(cmd_version.cli)
self.assertTrue(__version__ in test.output[0])
self.assertEqual(0, result.exit_code)
with open(_version_file) as f:
self.assertEqual(f"__popper_version__ = '{__version__}'\n", f.read())
|
live_demo/realsense/camera.py | AriaPs/cleargrasp | 191 | 12722037 | #!/usr/bin/env python3
import socket
import numpy as np
import cv2
import os
import time
import struct
class Camera(object):
def __init__(self):
# Data options (change me)
self.im_height = 720 # 848x480, 1280x720
self.im_width = 1280
# self.resize_height = 720
# self.resize_width = 1280
self.tcp_host_ip = '127.0.0.1'
self.tcp_port = 50010
self.buffer_size = 10*4 + self.im_height*self.im_width*5 # in bytes
# Connect to server
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
self.intrinsics = None
self.get_data()
def get_data(self):
# Ping the server with anything
self.tcp_socket.send(b'asdf')
# Fetch TCP data:
# color camera intrinsics, 9 floats, number of bytes: 9 x 4
# depth scale for converting depth from uint16 to float, 1 float, number of bytes: 4
# depth image, self.im_width x self.im_height uint16, number of bytes: self.im_width x self.im_height x 2
# color image, self.im_width x self.im_height x 3 uint8, number of bytes: self.im_width x self.im_height x 3
data = b''
while len(data) < ((9*4 + 9*4 + 16*4 + 4 + 8)+(self.im_height*self.im_width*5)):
data += self.tcp_socket.recv(self.buffer_size)
# while len(data) < (10*4 + self.im_height*self.im_width*5):
# data += self.tcp_socket.recv(self.buffer_size)
# Reorganize TCP data into color and depth frame
self.color_intr = np.fromstring(data[0:(9*4)],np.float32).reshape(3,3)
self.depth_intr = np.fromstring(data[(9*4):(9*4+9*4)],np.float32).reshape(3,3)
self.depth2color_extr = np.fromstring(data[(9*4+9*4):(9*4+9*4+16*4)],np.float32).reshape(4,4)
depth_scale = np.fromstring(data[(9*4+9*4+16*4):(9*4+9*4+16*4+4)],np.float32)[0]
self.timestamp = np.fromstring(data[(9*4+9*4+16*4+4):(9*4+9*4+16*4+4+8)],np.long)[0]
depth_im = np.fromstring(data[(9*4+9*4+16*4+4+8):((9*4+9*4+16*4+4+8)+self.im_width*self.im_height*2)],np.uint16).reshape(self.im_height,self.im_width)
self.color_im = np.fromstring(data[((9*4+9*4+16*4+4+8)+self.im_width*self.im_height*2):],np.uint8).reshape(self.im_height,self.im_width,3)
# TODO: Get depth scaled from data and use that hre
depth_im = depth_im.astype(float) * depth_scale # default: 0.001
# Set invalid depth pixels to zero
self.depth_im = depth_im
self.depth_im[np.isnan(self.depth_im)] = 0.0
self.depth_im[np.isinf(self.depth_im)] = 0.0
# self.color_im = cv2.resize(self.color_im, (self.resize_width, self.resize_height), interpolation=cv2.INTER_CUBIC)
# self.depth_im = cv2.resize(self.depth_im, (self.resize_width, self.resize_height), interpolation=cv2.INTER_NEAREST)
return self.color_im, self.depth_im
|
benchmarks/python_e2e/cugraph_funcs.py | kaatish/cugraph | 991 | 12722073 | # Copyright (c) 2021, <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cugraph
from cugraph.generators import rmat
def generate_edgelist(scale,
edgefactor,
seed=None,
unweighted=False,
):
"""
Returns a cudf DataFrame created using the R-MAT graph generator.
The resulting graph is weighted with random values of a uniform distribution
from the interval [0, 1)
scale is used to determine the number of vertices to be generated (num_verts
= 2^scale), which is also used to determine the data type for the vertex ID
values in the DataFrame.
edgefactor determies the number of edges (num_edges = num_edges*edgefactor)
seed, if specified, will be used as the seed to the RNG.
unweighted determines if the resulting edgelist will have randomly-generated
weightes ranging in value between [0, 1). If True, an edgelist with only 2
columns is returned.
"""
df = rmat(
scale,
(2**scale)*edgefactor,
0.1,
0.2,
0.3,
seed or 42,
clip_and_flip=False,
scramble_vertex_ids=True,
create_using=None, # return edgelist instead of Graph instance
mg=False
)
if not unweighted:
rng = np.random.default_rng(seed)
df["weight"] = rng.random(size=len(df))
return df
def read_csv(input_csv_file, scale):
"""
Returns a cudf DataFrame from reading input_csv_file.
All input CSV files should be weighted with random values of a uniform
distribution from the interval [0, 1) in order to best simulate the output
of a Graph500-compliant graph generator.
scale is used to determine the data type for the vertex ID values in the
DataFrame. (num verts = 2^scale), which is used to determine
"""
vertex_t = "int32" if scale <= 32 else "int64"
dtypes = [vertex_t, vertex_t, "float32"]
names=["src", "dst", "weight"],
chunksize = cugraph.dask.get_chunksize(input_csv_file)
return cudf.read_csv(input_csv_file,
chunksize=chunksize,
delimiter=" ",
#names=names,
dtype=dtypes,
header=None,
)
################################################################################
# Benchmarked functions
#
# The "benchmark_name" attr is used by the benchmark infra for reporting and is
# set to assign more meaningful names to be displayed in reports.
def construct_graph(dataframe, symmetric=False):
"""
dataframe contains weighted and undirected edges with self loops. Multiple
edges will likely be present as well. The returned Graph object must be
symmetrized and have self loops removed.
"""
if symmetric:
G = cugraph.Graph()
else:
G = cugraph.DiGraph()
if len(dataframe.columns) > 2:
G.from_cudf_edgelist(
dataframe, source="src", destination="dst", edge_attr="weight")
#G.from_cudf_edgelist(
# dataframe, source="0", destination="1", edge_attr="2")
else:
G.from_cudf_edgelist(
dataframe, source="src", destination="dst")
#G.from_cudf_edgelist(
# dataframe, source="0", destination="1")
return G
construct_graph.benchmark_name = "from_cudf_edgelist"
def bfs(G, start):
return cugraph.bfs(G, start=start)
def sssp(G, start):
return cugraph.sssp(G, source=start)
def wcc(G):
return cugraph.weakly_connected_components(G)
def louvain(G):
return cugraph.louvain(G)
def pagerank(G):
return cugraph.pagerank(G)
def katz(G, alpha=None):
return cugraph.katz_centrality(G, alpha)
################################################################################
# Session-wide setup and teardown
def setup(*args, **kwargs):
return tuple()
def teardown(*args, **kwargs):
pass
|
test/testtree.py | aslisabanci/datastructures | 159 | 12722074 | import unittest
from ds2.tree import Tree
class TestTree(unittest.TestCase):
def testinit(self):
Tree(['root'])
Tree([1, [2, [3], [4]], [5, [6], [7], [8]]])
def teststr(self):
self.assertEqual(str(Tree([1, [2], [3]])), "1\n 2\n 3")
self.assertEqual(str(Tree([1, [2, [3]]])), "1\n 2\n 3")
def testcontains(self):
T = Tree([1, [2, [3]]])
self.assertTrue(1 in T)
self.assertTrue(2 in T)
self.assertTrue(3 in T)
self.assertFalse(4 in T)
def testeq(self):
A = Tree([1, [2], [3]])
B = Tree([1, [2], [3]])
C = Tree([1, [3], [2]])
D = Tree([1, [2, [3]]])
E = Tree([1, [2, [3]]])
self.assertTrue(A == B)
self.assertTrue(D == E)
self.assertFalse(A == C)
self.assertFalse(B == C)
self.assertFalse(A == D)
def testheight(self):
A = Tree([1, [2, [3]]])
B = Tree([1, [2], [3], [4]])
C = Tree([1,[1,[1,[1,[1,[1]]]]]])
self.assertEqual(A.height(), 2)
self.assertEqual(B.height(), 1)
self.assertEqual(C.height(), 5)
self.assertEqual(Tree([1]).height(), 0)
def testpreorder(self):
A = Tree([1, [2], [3]])
B = Tree([1, [3], [2]])
C = Tree([1, [2, [3]]])
self.assertEqual(list(A.preorder()), [1, 2, 3])
self.assertEqual(list(B.preorder()), [1, 3, 2])
self.assertEqual(list(C.preorder()), [1, 2, 3])
def testiter(self):
A = Tree([4, [5], [6]])
B = Tree([1, [3], [2]])
C = Tree([1, [2, [3]]])
self.assertEqual(list(A), [4, 5, 6])
self.assertEqual(list(B), [1, 3, 2])
self.assertEqual(list(C), [1, 2, 3])
def testpostorder(self):
A = Tree([1, [2], [3]])
B = Tree([1, [3], [2]])
C = Tree([1, [2, [3]]])
self.assertEqual(list(A.postorder()), [2, 3, 1])
self.assertEqual(list(B.postorder()), [3, 2, 1])
self.assertEqual(list(C.postorder()), [3, 2, 1])
def testlayerorder(self):
A = Tree([1, [2], [3]])
B = Tree([1, [2, [3]], [4]])
C = Tree([1, [2, [3], [4]]])
self.assertEqual(list(A.layerorder()), [1, 2, 3])
self.assertEqual(list(B.layerorder()), [1, 2, 4, 3])
self.assertEqual(list(C.layerorder()), [1, 2, 3, 4])
if __name__ == '__main__':
unittest.main()
|
samples/python/53.teams-messaging-extensions-action-preview/example_data.py | Aliacf21/BotBuilder-Samples | 1,998 | 12722079 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class ExampleData(object):
def __init__(
self,
question: str = None,
is_multi_select: bool = False,
option1: str = None,
option2: str = None,
option3: str = None,
):
self.question = question
self.is_multi_select = is_multi_select
self.option1 = option1
self.option2 = option2
self.option3 = option3
|
examples/slider.py | salt-die/nurses_2 | 171 | 12722097 | """
Example slider widget.
"""
import asyncio
from nurses_2.app import App
from nurses_2.colors import BLUE, GREEN, BLACK, RED, ColorPair
from nurses_2.widgets.text_widget import TextWidget
from nurses_2.widgets.slider import Slider
GREEN_ON_BLACK = ColorPair.from_colors(GREEN, BLACK)
class MyApp(App):
async def on_start(self):
display = TextWidget(size=(2, 30))
display.add_text("Slider 1 Value:", row=0)
display.add_text("Slider 2 Value:", row=1)
slider_1 = Slider(
width=20,
pos=(2, 0),
min=0,
max=100,
handle_color=BLUE,
callback=lambda value: display.add_text(f"{round(value, 3):<10}", row=0, column=16),
fill_color=RED,
default_color_pair=GREEN_ON_BLACK,
)
slider_2 = Slider(
width=15,
pos=(3, 0),
min=-20,
max=50,
handle_color=BLUE,
callback=lambda value: display.add_text(f"{round(value, 3):<10}", row=1, column=16),
fill_color=RED,
default_color_pair=GREEN_ON_BLACK,
)
self.add_widgets(display, slider_1, slider_2)
MyApp().run()
|
python-sorting-algorithms/Selection-sort.py | vatsla1601/Hacktoberfest2020-1 | 266 | 12722111 | def selection_sort(nums):
# This value of i corresponds to how many values were sorted
for i in range(len(nums)):
# We assume that the first item of the unsorted segment is the smallest
lowest_value_index = i
# This loop iterates over the unsorted items
for j in range(i + 1, len(nums)):
if nums[j] < nums[lowest_value_index]:
lowest_value_index = j
# Swap values of the lowest unsorted element with the first unsorted
# element
nums[i], nums[lowest_value_index] = nums[lowest_value_index], nums[i]
# Verify it works
random_list_of_nums = [12, 8, 3, 20, 11]
selection_sort(random_list_of_nums)
print(random_list_of_nums) |
test_python_toolbox/test_nifty_collections/test_various_ordered_sets.py | hboshnak/python_toolbox | 119 | 12722130 | # Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
import operator
from python_toolbox import cute_testing
from python_toolbox import logic_tools
from python_toolbox import emitting
from python_toolbox.nifty_collections import (OrderedSet, FrozenOrderedSet,
EmittingOrderedSet)
class BaseOrderedSetTestCase(cute_testing.TestCase):
__test__ = False
def test_operations(self):
ordered_set = self.ordered_set_type([5, 61, 2, 7, 2])
assert type(ordered_set | ordered_set) == \
type(ordered_set & ordered_set) == type(ordered_set)
def test_bool(self):
assert bool(self.ordered_set_type({})) is False
assert bool(self.ordered_set_type({0})) is True
assert bool(self.ordered_set_type(range(5))) is True
class BaseMutableOrderedSetTestCase(BaseOrderedSetTestCase):
__test__ = False
def test_sort(self):
ordered_set = self.ordered_set_type([5, 61, 2, 7, 2])
assert ordered_set != {5, 61, 2, 7}
ordered_set.move_to_end(61)
assert list(ordered_set) == [5, 2, 7, 61]
ordered_set.sort()
assert list(ordered_set) == [2, 5, 7, 61]
ordered_set.sort(key=lambda x: -x, reverse=True)
assert list(ordered_set) == [2, 5, 7, 61]
def test_mutable(self):
ordered_set = self.ordered_set_type(range(4))
assert list(ordered_set) == list(range(4))
assert len(ordered_set) == 4
assert 1 in ordered_set
assert 3 in ordered_set
assert 7 not in ordered_set
ordered_set.add(8)
assert list(ordered_set)[-1] == 8
ordered_set.discard(2)
assert 2 not in ordered_set
assert list(reversed(ordered_set)) == [8, 3, 1, 0]
assert ordered_set.pop() == 8
assert ordered_set.pop(last=False) == 0
ordered_set.add(7, last=False)
assert tuple(ordered_set) == (7, 1, 3)
with cute_testing.RaiseAssertor(KeyError):
ordered_set.remove('meow')
ordered_set.discard('meow')
ordered_set.discard('meow')
ordered_set.discard('meow')
assert ordered_set | ordered_set == ordered_set
assert ordered_set & ordered_set == ordered_set
class OrderedSetTestCase(BaseMutableOrderedSetTestCase):
__test__ = True
ordered_set_type = OrderedSet
class FrozenOrderedSetTestCase(BaseOrderedSetTestCase):
__test__ = True
ordered_set_type = FrozenOrderedSet
def test_frozen(self):
frozen_ordered_set = self.ordered_set_type(range(4))
assert list(frozen_ordered_set) == list(range(4))
assert len(frozen_ordered_set) == 4
assert 1 in frozen_ordered_set
assert 3 in frozen_ordered_set
assert 7 not in frozen_ordered_set
with cute_testing.RaiseAssertor(AttributeError):
frozen_ordered_set.add(8)
with cute_testing.RaiseAssertor(AttributeError):
frozen_ordered_set.discard(2)
with cute_testing.RaiseAssertor(AttributeError):
frozen_ordered_set.remove(2)
with cute_testing.RaiseAssertor(AttributeError):
frozen_ordered_set.clear()
with cute_testing.RaiseAssertor(AttributeError):
frozen_ordered_set.sort()
with cute_testing.RaiseAssertor(AttributeError):
frozen_ordered_set.move_to_end(2)
with cute_testing.RaiseAssertor(AttributeError):
frozen_ordered_set.pop(2)
assert list(frozen_ordered_set) == list(range(4))
def test_hashable(self):
d = {
FrozenOrderedSet(range(1)): 1,
FrozenOrderedSet(range(2)): 2,
FrozenOrderedSet(range(3)): 3,
}
assert len(d) == 3
assert set(d.values()) == {1, 2, 3}
assert d[FrozenOrderedSet(range(2))] == 2
d[FrozenOrderedSet(range(2))] = 20
assert set(d.values()) == {1, 20, 3}
class EmittingOrderedSetTestCase(BaseMutableOrderedSetTestCase):
__test__ = True
ordered_set_type = EmittingOrderedSet
def test_emitting(self):
times_emitted = [0]
def increment_times_emitted():
times_emitted[0] += 1
emitter = emitting.Emitter(outputs=increment_times_emitted)
emitting_ordered_set = self.ordered_set_type(range(7), emitter=emitter)
assert times_emitted == [0]
emitting_ordered_set.add(7)
assert times_emitted == [1]
emitting_ordered_set.add(7)
assert times_emitted == [1]
emitting_ordered_set.discard(17)
assert times_emitted == [1]
assert emitting_ordered_set.get_without_emitter() == \
OrderedSet(range(8))
emitting_ordered_set |= (8, 9, 10)
assert times_emitted == [4]
emitting_ordered_set |= (8, 9, 10)
assert times_emitted == [4]
assert emitting_ordered_set.get_without_emitter() == \
OrderedSet(range(11))
emitting_ordered_set.move_to_end(4)
assert times_emitted == [5]
assert tuple(emitting_ordered_set) == \
(0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 4)
def test_operations_on_different_types():
x1 = OrderedSet(range(0, 4)) | FrozenOrderedSet(range(2, 6))
x2 = OrderedSet(range(0, 4)) & FrozenOrderedSet(range(2, 6))
x3 = FrozenOrderedSet(range(0, 4)) | OrderedSet(range(2, 6))
x4 = FrozenOrderedSet(range(0, 4)) & OrderedSet(range(2, 6))
assert type(x1) == OrderedSet
assert type(x2) == OrderedSet
assert type(x3) == FrozenOrderedSet
assert type(x4) == FrozenOrderedSet
assert x1 == OrderedSet(range(0, 6))
assert x2 == OrderedSet(range(2, 4))
assert x3 == FrozenOrderedSet(range(0, 6))
assert x4 == FrozenOrderedSet(range(2, 4))
assert logic_tools.all_equivalent((x1, x2, x3, x4),
relation=operator.ne)
|
blend_predict.py | wubinbai/argus-freesound | 314 | 12722150 | <gh_stars>100-1000
import numpy as np
import pandas as pd
from src.predictor import Predictor
from src.transforms import get_transforms
from src.utils import get_best_model_path, gmean_preds_blend
from src.datasets import get_test_data
from src import config
EXPERIMENTS = [
'auxiliary_009',
'auxiliary_010',
'auxiliary_011'
]
DEVICE = 'cuda'
CROP_SIZE = 256
BATCH_SIZE = 16
def pred_test(predictor, test_data):
fname_lst, images_lst = test_data
pred_lst = []
for image in images_lst:
pred = predictor.predict(image)
pred = pred.mean(axis=0)
pred_lst.append(pred)
preds = np.stack(pred_lst, axis=0)
pred_df = pd.DataFrame(data=preds,
index=fname_lst,
columns=config.classes)
pred_df.index.name = 'fname'
return pred_df
def experiment_pred(experiment_dir, test_data):
print(f"Start predict: {experiment_dir}")
transforms = get_transforms(False, CROP_SIZE)
pred_df_lst = []
for fold in config.folds:
print("Predict fold", fold)
fold_dir = experiment_dir / f'fold_{fold}'
model_path = get_best_model_path(fold_dir)
print("Model path", model_path)
predictor = Predictor(model_path, transforms,
BATCH_SIZE,
(config.audio.n_mels, CROP_SIZE),
(config.audio.n_mels, CROP_SIZE//4),
device=DEVICE)
pred_df = pred_test(predictor, test_data)
pred_df_lst.append(pred_df)
pred_df = gmean_preds_blend(pred_df_lst)
return pred_df
if __name__ == "__main__":
print("Experiments", EXPERIMENTS)
test_data = get_test_data()
exp_pred_df_lst = []
for experiment in EXPERIMENTS:
experiment_dir = config.experiments_dir / experiment
exp_pred_df = experiment_pred(experiment_dir, test_data)
exp_pred_df_lst.append(exp_pred_df)
blend_pred_df = gmean_preds_blend(exp_pred_df_lst)
blend_pred_df.to_csv('submission.csv')
|
Chapter07/friedman.py | eduard-sukharev/Hands-On-Genetic-Algorithms-with-Python | 156 | 12722200 | import numpy as np
from sklearn import model_selection
from sklearn import datasets
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import seaborn as sns
class Friedman1Test:
"""This class encapsulates the Friedman1 regression test for feature selection
"""
VALIDATION_SIZE = 0.20
NOISE = 1.0
def __init__(self, numFeatures, numSamples, randomSeed):
"""
:param numFeatures: total number of features to be used (at least 5)
:param numSamples: number of samples in dataset
:param randomSeed: random seed value used for reproducible results
"""
self.numFeatures = numFeatures
self.numSamples = numSamples
self.randomSeed = randomSeed
# generate test data:
self.X, self.y = datasets.make_friedman1(n_samples=self.numSamples, n_features=self.numFeatures,
noise=self.NOISE, random_state=self.randomSeed)
# divide the data to a training set and a validation set:
self.X_train, self.X_validation, self.y_train, self.y_validation = \
model_selection.train_test_split(self.X, self.y, test_size=self.VALIDATION_SIZE, random_state=self.randomSeed)
self.regressor = GradientBoostingRegressor(random_state=self.randomSeed)
def __len__(self):
"""
:return: the total number of features
"""
return self.numFeatures
def getMSE(self, zeroOneList):
"""
returns the mean squared error of the regressor, calculated for the validation set, after training
using the features selected by the zeroOneList
:param zeroOneList: a list of binary values corresponding the features in the dataset. A value of '1'
represents selecting the corresponding feature, while a value of '0' means that the feature is dropped.
:return: the mean squared error of the regressor when using the features selected by the zeroOneList
"""
# drop the columns of the training and validation sets that correspond to the
# unselected features:
zeroIndices = [i for i, n in enumerate(zeroOneList) if n == 0]
currentX_train = np.delete(self.X_train, zeroIndices, 1)
currentX_validation = np.delete(self.X_validation, zeroIndices, 1)
# train the regression model using th etraining set:
self.regressor.fit(currentX_train, self.y_train)
# calculate the regressor's output for the validation set:
prediction = self.regressor.predict(currentX_validation)
# return the mean square error of predicition vs actual data:
return mean_squared_error(self.y_validation, prediction)
# testing the class:
def main():
# create a test instance:
test = Friedman1Test(numFeatures=15, numSamples=60, randomSeed=42)
scores = []
# calculate MSE for 'n' first features:
for n in range(1, len(test) + 1):
nFirstFeatures = [1] * n + [0] * (len(test) - n)
score = test.getMSE(nFirstFeatures)
print("%d first features: score = %f" % (n, score))
scores.append(score)
# plot graph:
sns.set_style("whitegrid")
plt.plot([i + 1 for i in range(len(test))], scores, color='red')
plt.xticks(np.arange(1, len(test) + 1, 1.0))
plt.xlabel('n First Features')
plt.ylabel('MSE')
plt.title('MSE over Features Selected')
plt.show()
if __name__ == "__main__":
main()
|
LSTM.py | harrys17451/CryptocurrencyPrediction | 669 | 12722217 | import pandas as pd
import numpy as numpy
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten,Reshape
from keras.layers import Conv1D, MaxPooling1D
from keras.utils import np_utils
from keras.layers import LSTM, LeakyReLU, CuDNNLSTM
from keras.callbacks import CSVLogger, ModelCheckpoint
import h5py
import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras import regularizers
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
with h5py.File(''.join(['data/bitcoin2015to2017_close.h5']), 'r') as hf:
datas = hf['inputs'].value
labels = hf['outputs'].value
step_size = datas.shape[1]
units= 50
second_units = 30
batch_size = 8
nb_features = datas.shape[2]
epochs = 50
output_size=16
reg = 1
output_file_name='bitcoin2015to2017_close_LSTM_1_tanh_leaky_areg_l1_'+ str(reg)
#split training validation
training_size = int(0.8* datas.shape[0])
training_datas = datas[:training_size,:]
training_labels = labels[:training_size,:,0]
validation_datas = datas[training_size:,:]
validation_labels = labels[training_size:,:,0]
#build model
model = Sequential()
model.add(CuDNNLSTM(units=units, activity_regularizer=regularizers.l1(reg), input_shape=(step_size,nb_features),return_sequences=False))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(output_size))
model.add(LeakyReLU())
model.compile(loss='mse', optimizer='adam')
model.fit(training_datas, training_labels, batch_size=batch_size,validation_data=(validation_datas,validation_labels), epochs = epochs, callbacks=[CSVLogger(output_file_name+'.csv', append=True)])
# model.fit(datas,labels)
#model.save(output_file_name+'.h5')
|
PFPLD/data/add_type_label.py | ICpachong/nniefacelib | 505 | 12722234 |
ori_file = '/home/unaguo/hanson/data/landmark/WFLW191104/train_data/300W_LP.txt'
save_file = '/home/unaguo/hanson/data/landmark/WFLW191104/train_data/300W_LP1.txt'
lable = '0'
ori_lines = []
with open(ori_file, 'r')as f:
ori_lines = f.readlines()
with open(save_file, 'w')as f:
for line in ori_lines:
line = line.strip()
new_line = '{} {}\n'.format(line, lable)
f.write(new_line)
|
__manifest__.py | wdw139130/wechat-mall | 108 | 12722302 | <reponame>wdw139130/wechat-mall
# -*- coding: utf-8 -*-
{
'name': "wechat_mall",
'application': True,
'summary': u"""
微信小程序商城管理后台""",
'description': u"""
微信小程序商城管理后台
""",
'author': "Gzp",
'website': "http://wechat.elfgzp.cn",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml
# for the full list
'category': 'Website',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'mail', 'website'],
# always loaded
'data': [
'security/wechat_mall_security.xml',
'security/ir.model.access.csv',
'views/parent_menus.xml',
# logistics views
'views/logistics/wechat_mall_city_views.xml',
'views/logistics/wechat_mall_district_views.xml',
'views/logistics/wechat_mall_logistics_views.xml',
'views/logistics/wechat_mall_province_views.xml',
'views/logistics/wechat_mall_shipper_views.xml',
'views/logistics/menu_logistics.xml',
# order views
'views/order/wechat_mall_order_views.xml',
'views/order/menu_order.xml',
# product views
'views/product/wechat_mall_category_views.xml',
'views/product/wechat_mall_goods_views.xml',
'views/product/wechat_mall_subshop_views.xml',
'views/product/menu_product.xml',
# setting views
'views/setting/wechat_mall_banner_views.xml',
'views/setting/wechat_mall_config_settings_views.xml',
'views/setting/wechat_mall_user_views.xml',
'views/setting/wechat_mall_address_views.xml',
'views/setting/menu_setting.xml',
# other
'views/ir_attachment_view.xml',
'views/wechat_mall_modify_price_wizard_views.xml',
'views/wechat_mall_deliver_wizard_views.xml',
'views/webclient_templates.xml',
'data/order_num_sequence.xml',
'data/payment_num_sequence.xml',
'data/mail_template.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
|
tests/extension/thread_/to_thread_pool/test_thread_to_thread_pool.py | jesseclin/veriloggen | 232 | 12722321 | from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_to_thread_pool
expected_verilog = """
module test;
reg CLK;
reg RST;
blinkled
uut
(
.CLK(CLK),
.RST(RST)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST
);
reg [8-1:0] _th_myfunc_a_0_start;
reg [32-1:0] th_blink;
localparam th_blink_init = 0;
reg signed [32-1:0] _th_blink_times_0;
reg signed [32-1:0] _th_blink_tid_1;
reg [32-1:0] th_myfunc_a_0;
localparam th_myfunc_a_0_init = 0;
reg [32-1:0] th_myfunc_a_1;
localparam th_myfunc_a_1_init = 0;
reg [32-1:0] th_myfunc_a_2;
localparam th_myfunc_a_2_init = 0;
reg [32-1:0] th_myfunc_a_3;
localparam th_myfunc_a_3_init = 0;
reg [32-1:0] th_myfunc_b_0;
localparam th_myfunc_b_0_init = 0;
reg [32-1:0] th_myfunc_b_1;
localparam th_myfunc_b_1_init = 0;
reg [32-1:0] th_myfunc_b_2;
localparam th_myfunc_b_2_init = 0;
reg [32-1:0] th_myfunc_b_3;
localparam th_myfunc_b_3_init = 0;
reg _th_myfunc_a_0_called;
reg signed [32-1:0] _th_myfunc_a_0_tid_2;
reg signed [32-1:0] _th_myfunc_a_0_tid_3;
reg signed [32-1:0] _th_myfunc_a_0_i_4;
reg signed [32-1:0] _th_myfunc_a_0_tmp_5_6;
reg _th_myfunc_a_1_called;
reg signed [32-1:0] _th_myfunc_a_1_tid_7;
reg signed [32-1:0] _th_myfunc_a_1_tid_8;
reg signed [32-1:0] _th_myfunc_a_1_i_9;
reg signed [32-1:0] _th_myfunc_a_1_tmp_10_11;
reg _th_myfunc_a_2_called;
reg signed [32-1:0] _th_myfunc_a_2_tid_12;
reg signed [32-1:0] _th_myfunc_a_2_tid_13;
reg signed [32-1:0] _th_myfunc_a_2_i_14;
reg signed [32-1:0] _th_myfunc_a_2_tmp_15_16;
reg _th_myfunc_a_3_called;
reg signed [32-1:0] _th_myfunc_a_3_tid_17;
reg signed [32-1:0] _th_myfunc_a_3_tid_18;
reg signed [32-1:0] _th_myfunc_a_3_i_19;
reg signed [32-1:0] _th_myfunc_a_3_tmp_20_21;
reg _th_myfunc_b_0_called;
reg signed [32-1:0] _th_myfunc_b_0_tid_22;
reg signed [32-1:0] _th_myfunc_b_0_tid_23;
reg signed [32-1:0] _th_myfunc_b_0_i_24;
reg signed [32-1:0] _th_myfunc_b_0_tmp_25_26;
reg _th_myfunc_b_1_called;
reg signed [32-1:0] _th_myfunc_b_1_tid_27;
reg signed [32-1:0] _th_myfunc_b_1_tid_28;
reg signed [32-1:0] _th_myfunc_b_1_i_29;
reg signed [32-1:0] _th_myfunc_b_1_tmp_30_31;
reg _th_myfunc_b_2_called;
reg signed [32-1:0] _th_myfunc_b_2_tid_32;
reg signed [32-1:0] _th_myfunc_b_2_tid_33;
reg signed [32-1:0] _th_myfunc_b_2_i_34;
reg signed [32-1:0] _th_myfunc_b_2_tmp_35_36;
reg _th_myfunc_b_3_called;
reg signed [32-1:0] _th_myfunc_b_3_tid_37;
reg signed [32-1:0] _th_myfunc_b_3_tid_38;
reg signed [32-1:0] _th_myfunc_b_3_i_39;
reg signed [32-1:0] _th_myfunc_b_3_tmp_40_41;
reg signed [32-1:0] _th_blink_sum_42;
localparam th_blink_1 = 1;
localparam th_blink_2 = 2;
localparam th_blink_3 = 3;
localparam th_blink_4 = 4;
localparam th_blink_5 = 5;
localparam th_blink_6 = 6;
localparam th_blink_7 = 7;
localparam th_blink_8 = 8;
localparam th_blink_9 = 9;
localparam th_blink_10 = 10;
localparam th_blink_11 = 11;
localparam th_blink_12 = 12;
localparam th_blink_13 = 13;
localparam th_blink_14 = 14;
always @(posedge CLK) begin
if(RST) begin
th_blink <= th_blink_init;
_th_blink_times_0 <= 0;
_th_blink_tid_1 <= 0;
_th_myfunc_a_0_start[_th_blink_tid_1] <= (0 >> _th_blink_tid_1) & 1'd1;
_th_blink_sum_42 <= 0;
end else begin
case(th_blink)
th_blink_init: begin
_th_blink_times_0 <= 20;
th_blink <= th_blink_1;
end
th_blink_1: begin
_th_blink_tid_1 <= 0;
th_blink <= th_blink_2;
end
th_blink_2: begin
if(_th_blink_tid_1 < 8) begin
th_blink <= th_blink_3;
end else begin
th_blink <= th_blink_7;
end
end
th_blink_3: begin
_th_myfunc_a_0_start[_th_blink_tid_1] <= 1;
th_blink <= th_blink_4;
end
th_blink_4: begin
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
th_blink <= th_blink_5;
end
th_blink_5: begin
_th_myfunc_a_0_start[_th_blink_tid_1] <= 0;
th_blink <= th_blink_6;
end
th_blink_6: begin
_th_blink_tid_1 <= _th_blink_tid_1 + 1;
th_blink <= th_blink_2;
end
th_blink_7: begin
_th_blink_sum_42 <= 0;
th_blink <= th_blink_8;
end
th_blink_8: begin
_th_blink_tid_1 <= 0;
th_blink <= th_blink_9;
end
th_blink_9: begin
if(_th_blink_tid_1 < 8) begin
th_blink <= th_blink_10;
end else begin
th_blink <= th_blink_13;
end
end
th_blink_10: begin
if((_th_blink_tid_1 == 0)? th_myfunc_a_0 == 7 :
(_th_blink_tid_1 == 1)? th_myfunc_a_1 == 7 :
(_th_blink_tid_1 == 2)? th_myfunc_a_2 == 7 :
(_th_blink_tid_1 == 3)? th_myfunc_a_3 == 7 :
(_th_blink_tid_1 == 4)? th_myfunc_b_0 == 7 :
(_th_blink_tid_1 == 5)? th_myfunc_b_1 == 7 :
(_th_blink_tid_1 == 6)? th_myfunc_b_2 == 7 :
(_th_blink_tid_1 == 7)? th_myfunc_b_3 == 7 : 0) begin
th_blink <= th_blink_11;
end
end
th_blink_11: begin
_th_blink_sum_42 <= _th_blink_sum_42 + ((_th_blink_tid_1 == 0)? _th_myfunc_a_0_tmp_5_6 :
(_th_blink_tid_1 == 1)? _th_myfunc_a_1_tmp_10_11 :
(_th_blink_tid_1 == 2)? _th_myfunc_a_2_tmp_15_16 :
(_th_blink_tid_1 == 3)? _th_myfunc_a_3_tmp_20_21 :
(_th_blink_tid_1 == 4)? _th_myfunc_b_0_tmp_25_26 :
(_th_blink_tid_1 == 5)? _th_myfunc_b_1_tmp_30_31 :
(_th_blink_tid_1 == 6)? _th_myfunc_b_2_tmp_35_36 :
(_th_blink_tid_1 == 7)? _th_myfunc_b_3_tmp_40_41 : 'hx);
th_blink <= th_blink_12;
end
th_blink_12: begin
_th_blink_tid_1 <= _th_blink_tid_1 + 1;
th_blink <= th_blink_9;
end
th_blink_13: begin
$display("sum = %d", _th_blink_sum_42);
th_blink <= th_blink_14;
end
endcase
end
end
localparam th_myfunc_a_0_1 = 1;
localparam th_myfunc_a_0_2 = 2;
localparam th_myfunc_a_0_3 = 3;
localparam th_myfunc_a_0_4 = 4;
localparam th_myfunc_a_0_5 = 5;
localparam th_myfunc_a_0_6 = 6;
localparam th_myfunc_a_0_7 = 7;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_a_0 <= th_myfunc_a_0_init;
_th_myfunc_a_0_called <= 0;
_th_myfunc_a_0_tid_2 <= 0;
_th_myfunc_a_0_tid_3 <= 0;
_th_myfunc_a_0_i_4 <= 0;
_th_myfunc_a_0_tmp_5_6 <= 0;
end else begin
case(th_myfunc_a_0)
th_myfunc_a_0_init: begin
if(_th_myfunc_a_0_start[0] && (th_blink == 4)) begin
_th_myfunc_a_0_called <= 1;
end
if(_th_myfunc_a_0_start[0] && (th_blink == 4)) begin
_th_myfunc_a_0_tid_2 <= _th_blink_tid_1;
end
if((th_blink == 4) && _th_myfunc_a_0_start[0]) begin
th_myfunc_a_0 <= th_myfunc_a_0_1;
end
end
th_myfunc_a_0_1: begin
_th_myfunc_a_0_tid_3 <= _th_myfunc_a_0_tid_2;
th_myfunc_a_0 <= th_myfunc_a_0_2;
end
th_myfunc_a_0_2: begin
$display("myfunc_a: tid = %d", _th_myfunc_a_0_tid_3);
th_myfunc_a_0 <= th_myfunc_a_0_3;
end
th_myfunc_a_0_3: begin
_th_myfunc_a_0_i_4 <= 0;
th_myfunc_a_0 <= th_myfunc_a_0_4;
end
th_myfunc_a_0_4: begin
if(_th_myfunc_a_0_i_4 < 30 - _th_myfunc_a_0_tid_3) begin
th_myfunc_a_0 <= th_myfunc_a_0_5;
end else begin
th_myfunc_a_0 <= th_myfunc_a_0_6;
end
end
th_myfunc_a_0_5: begin
_th_myfunc_a_0_i_4 <= _th_myfunc_a_0_i_4 + 1;
th_myfunc_a_0 <= th_myfunc_a_0_4;
end
th_myfunc_a_0_6: begin
_th_myfunc_a_0_tmp_5_6 <= _th_myfunc_a_0_tid_3 + 100;
th_myfunc_a_0 <= th_myfunc_a_0_7;
end
endcase
end
end
localparam th_myfunc_a_1_1 = 1;
localparam th_myfunc_a_1_2 = 2;
localparam th_myfunc_a_1_3 = 3;
localparam th_myfunc_a_1_4 = 4;
localparam th_myfunc_a_1_5 = 5;
localparam th_myfunc_a_1_6 = 6;
localparam th_myfunc_a_1_7 = 7;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_a_1 <= th_myfunc_a_1_init;
_th_myfunc_a_1_called <= 0;
_th_myfunc_a_1_tid_7 <= 0;
_th_myfunc_a_1_tid_8 <= 0;
_th_myfunc_a_1_i_9 <= 0;
_th_myfunc_a_1_tmp_10_11 <= 0;
end else begin
case(th_myfunc_a_1)
th_myfunc_a_1_init: begin
if(_th_myfunc_a_0_start[1] && (th_blink == 4)) begin
_th_myfunc_a_1_called <= 1;
end
if(_th_myfunc_a_0_start[1] && (th_blink == 4)) begin
_th_myfunc_a_1_tid_7 <= _th_blink_tid_1;
end
if((th_blink == 4) && _th_myfunc_a_0_start[1]) begin
th_myfunc_a_1 <= th_myfunc_a_1_1;
end
end
th_myfunc_a_1_1: begin
_th_myfunc_a_1_tid_8 <= _th_myfunc_a_1_tid_7;
th_myfunc_a_1 <= th_myfunc_a_1_2;
end
th_myfunc_a_1_2: begin
$display("myfunc_a: tid = %d", _th_myfunc_a_1_tid_8);
th_myfunc_a_1 <= th_myfunc_a_1_3;
end
th_myfunc_a_1_3: begin
_th_myfunc_a_1_i_9 <= 0;
th_myfunc_a_1 <= th_myfunc_a_1_4;
end
th_myfunc_a_1_4: begin
if(_th_myfunc_a_1_i_9 < 30 - _th_myfunc_a_1_tid_8) begin
th_myfunc_a_1 <= th_myfunc_a_1_5;
end else begin
th_myfunc_a_1 <= th_myfunc_a_1_6;
end
end
th_myfunc_a_1_5: begin
_th_myfunc_a_1_i_9 <= _th_myfunc_a_1_i_9 + 1;
th_myfunc_a_1 <= th_myfunc_a_1_4;
end
th_myfunc_a_1_6: begin
_th_myfunc_a_1_tmp_10_11 <= _th_myfunc_a_1_tid_8 + 100;
th_myfunc_a_1 <= th_myfunc_a_1_7;
end
endcase
end
end
localparam th_myfunc_a_2_1 = 1;
localparam th_myfunc_a_2_2 = 2;
localparam th_myfunc_a_2_3 = 3;
localparam th_myfunc_a_2_4 = 4;
localparam th_myfunc_a_2_5 = 5;
localparam th_myfunc_a_2_6 = 6;
localparam th_myfunc_a_2_7 = 7;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_a_2 <= th_myfunc_a_2_init;
_th_myfunc_a_2_called <= 0;
_th_myfunc_a_2_tid_12 <= 0;
_th_myfunc_a_2_tid_13 <= 0;
_th_myfunc_a_2_i_14 <= 0;
_th_myfunc_a_2_tmp_15_16 <= 0;
end else begin
case(th_myfunc_a_2)
th_myfunc_a_2_init: begin
if(_th_myfunc_a_0_start[2] && (th_blink == 4)) begin
_th_myfunc_a_2_called <= 1;
end
if(_th_myfunc_a_0_start[2] && (th_blink == 4)) begin
_th_myfunc_a_2_tid_12 <= _th_blink_tid_1;
end
if((th_blink == 4) && _th_myfunc_a_0_start[2]) begin
th_myfunc_a_2 <= th_myfunc_a_2_1;
end
end
th_myfunc_a_2_1: begin
_th_myfunc_a_2_tid_13 <= _th_myfunc_a_2_tid_12;
th_myfunc_a_2 <= th_myfunc_a_2_2;
end
th_myfunc_a_2_2: begin
$display("myfunc_a: tid = %d", _th_myfunc_a_2_tid_13);
th_myfunc_a_2 <= th_myfunc_a_2_3;
end
th_myfunc_a_2_3: begin
_th_myfunc_a_2_i_14 <= 0;
th_myfunc_a_2 <= th_myfunc_a_2_4;
end
th_myfunc_a_2_4: begin
if(_th_myfunc_a_2_i_14 < 30 - _th_myfunc_a_2_tid_13) begin
th_myfunc_a_2 <= th_myfunc_a_2_5;
end else begin
th_myfunc_a_2 <= th_myfunc_a_2_6;
end
end
th_myfunc_a_2_5: begin
_th_myfunc_a_2_i_14 <= _th_myfunc_a_2_i_14 + 1;
th_myfunc_a_2 <= th_myfunc_a_2_4;
end
th_myfunc_a_2_6: begin
_th_myfunc_a_2_tmp_15_16 <= _th_myfunc_a_2_tid_13 + 100;
th_myfunc_a_2 <= th_myfunc_a_2_7;
end
endcase
end
end
localparam th_myfunc_a_3_1 = 1;
localparam th_myfunc_a_3_2 = 2;
localparam th_myfunc_a_3_3 = 3;
localparam th_myfunc_a_3_4 = 4;
localparam th_myfunc_a_3_5 = 5;
localparam th_myfunc_a_3_6 = 6;
localparam th_myfunc_a_3_7 = 7;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_a_3 <= th_myfunc_a_3_init;
_th_myfunc_a_3_called <= 0;
_th_myfunc_a_3_tid_17 <= 0;
_th_myfunc_a_3_tid_18 <= 0;
_th_myfunc_a_3_i_19 <= 0;
_th_myfunc_a_3_tmp_20_21 <= 0;
end else begin
case(th_myfunc_a_3)
th_myfunc_a_3_init: begin
if(_th_myfunc_a_0_start[3] && (th_blink == 4)) begin
_th_myfunc_a_3_called <= 1;
end
if(_th_myfunc_a_0_start[3] && (th_blink == 4)) begin
_th_myfunc_a_3_tid_17 <= _th_blink_tid_1;
end
if((th_blink == 4) && _th_myfunc_a_0_start[3]) begin
th_myfunc_a_3 <= th_myfunc_a_3_1;
end
end
th_myfunc_a_3_1: begin
_th_myfunc_a_3_tid_18 <= _th_myfunc_a_3_tid_17;
th_myfunc_a_3 <= th_myfunc_a_3_2;
end
th_myfunc_a_3_2: begin
$display("myfunc_a: tid = %d", _th_myfunc_a_3_tid_18);
th_myfunc_a_3 <= th_myfunc_a_3_3;
end
th_myfunc_a_3_3: begin
_th_myfunc_a_3_i_19 <= 0;
th_myfunc_a_3 <= th_myfunc_a_3_4;
end
th_myfunc_a_3_4: begin
if(_th_myfunc_a_3_i_19 < 30 - _th_myfunc_a_3_tid_18) begin
th_myfunc_a_3 <= th_myfunc_a_3_5;
end else begin
th_myfunc_a_3 <= th_myfunc_a_3_6;
end
end
th_myfunc_a_3_5: begin
_th_myfunc_a_3_i_19 <= _th_myfunc_a_3_i_19 + 1;
th_myfunc_a_3 <= th_myfunc_a_3_4;
end
th_myfunc_a_3_6: begin
_th_myfunc_a_3_tmp_20_21 <= _th_myfunc_a_3_tid_18 + 100;
th_myfunc_a_3 <= th_myfunc_a_3_7;
end
endcase
end
end
localparam th_myfunc_b_0_1 = 1;
localparam th_myfunc_b_0_2 = 2;
localparam th_myfunc_b_0_3 = 3;
localparam th_myfunc_b_0_4 = 4;
localparam th_myfunc_b_0_5 = 5;
localparam th_myfunc_b_0_6 = 6;
localparam th_myfunc_b_0_7 = 7;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_b_0 <= th_myfunc_b_0_init;
_th_myfunc_b_0_called <= 0;
_th_myfunc_b_0_tid_22 <= 0;
_th_myfunc_b_0_tid_23 <= 0;
_th_myfunc_b_0_i_24 <= 0;
_th_myfunc_b_0_tmp_25_26 <= 0;
end else begin
case(th_myfunc_b_0)
th_myfunc_b_0_init: begin
if(_th_myfunc_a_0_start[4] && (th_blink == 4)) begin
_th_myfunc_b_0_called <= 1;
end
if(_th_myfunc_a_0_start[4] && (th_blink == 4)) begin
_th_myfunc_b_0_tid_22 <= _th_blink_tid_1;
end
if((th_blink == 4) && _th_myfunc_a_0_start[4]) begin
th_myfunc_b_0 <= th_myfunc_b_0_1;
end
end
th_myfunc_b_0_1: begin
_th_myfunc_b_0_tid_23 <= _th_myfunc_b_0_tid_22;
th_myfunc_b_0 <= th_myfunc_b_0_2;
end
th_myfunc_b_0_2: begin
$display("myfunc_b: tid = %d", _th_myfunc_b_0_tid_23);
th_myfunc_b_0 <= th_myfunc_b_0_3;
end
th_myfunc_b_0_3: begin
_th_myfunc_b_0_i_24 <= 0;
th_myfunc_b_0 <= th_myfunc_b_0_4;
end
th_myfunc_b_0_4: begin
if(_th_myfunc_b_0_i_24 < 30 - _th_myfunc_b_0_tid_23) begin
th_myfunc_b_0 <= th_myfunc_b_0_5;
end else begin
th_myfunc_b_0 <= th_myfunc_b_0_6;
end
end
th_myfunc_b_0_5: begin
_th_myfunc_b_0_i_24 <= _th_myfunc_b_0_i_24 + 1;
th_myfunc_b_0 <= th_myfunc_b_0_4;
end
th_myfunc_b_0_6: begin
_th_myfunc_b_0_tmp_25_26 <= _th_myfunc_b_0_tid_23 + 200;
th_myfunc_b_0 <= th_myfunc_b_0_7;
end
endcase
end
end
localparam th_myfunc_b_1_1 = 1;
localparam th_myfunc_b_1_2 = 2;
localparam th_myfunc_b_1_3 = 3;
localparam th_myfunc_b_1_4 = 4;
localparam th_myfunc_b_1_5 = 5;
localparam th_myfunc_b_1_6 = 6;
localparam th_myfunc_b_1_7 = 7;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_b_1 <= th_myfunc_b_1_init;
_th_myfunc_b_1_called <= 0;
_th_myfunc_b_1_tid_27 <= 0;
_th_myfunc_b_1_tid_28 <= 0;
_th_myfunc_b_1_i_29 <= 0;
_th_myfunc_b_1_tmp_30_31 <= 0;
end else begin
case(th_myfunc_b_1)
th_myfunc_b_1_init: begin
if(_th_myfunc_a_0_start[5] && (th_blink == 4)) begin
_th_myfunc_b_1_called <= 1;
end
if(_th_myfunc_a_0_start[5] && (th_blink == 4)) begin
_th_myfunc_b_1_tid_27 <= _th_blink_tid_1;
end
if((th_blink == 4) && _th_myfunc_a_0_start[5]) begin
th_myfunc_b_1 <= th_myfunc_b_1_1;
end
end
th_myfunc_b_1_1: begin
_th_myfunc_b_1_tid_28 <= _th_myfunc_b_1_tid_27;
th_myfunc_b_1 <= th_myfunc_b_1_2;
end
th_myfunc_b_1_2: begin
$display("myfunc_b: tid = %d", _th_myfunc_b_1_tid_28);
th_myfunc_b_1 <= th_myfunc_b_1_3;
end
th_myfunc_b_1_3: begin
_th_myfunc_b_1_i_29 <= 0;
th_myfunc_b_1 <= th_myfunc_b_1_4;
end
th_myfunc_b_1_4: begin
if(_th_myfunc_b_1_i_29 < 30 - _th_myfunc_b_1_tid_28) begin
th_myfunc_b_1 <= th_myfunc_b_1_5;
end else begin
th_myfunc_b_1 <= th_myfunc_b_1_6;
end
end
th_myfunc_b_1_5: begin
_th_myfunc_b_1_i_29 <= _th_myfunc_b_1_i_29 + 1;
th_myfunc_b_1 <= th_myfunc_b_1_4;
end
th_myfunc_b_1_6: begin
_th_myfunc_b_1_tmp_30_31 <= _th_myfunc_b_1_tid_28 + 200;
th_myfunc_b_1 <= th_myfunc_b_1_7;
end
endcase
end
end
localparam th_myfunc_b_2_1 = 1;
localparam th_myfunc_b_2_2 = 2;
localparam th_myfunc_b_2_3 = 3;
localparam th_myfunc_b_2_4 = 4;
localparam th_myfunc_b_2_5 = 5;
localparam th_myfunc_b_2_6 = 6;
localparam th_myfunc_b_2_7 = 7;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_b_2 <= th_myfunc_b_2_init;
_th_myfunc_b_2_called <= 0;
_th_myfunc_b_2_tid_32 <= 0;
_th_myfunc_b_2_tid_33 <= 0;
_th_myfunc_b_2_i_34 <= 0;
_th_myfunc_b_2_tmp_35_36 <= 0;
end else begin
case(th_myfunc_b_2)
th_myfunc_b_2_init: begin
if(_th_myfunc_a_0_start[6] && (th_blink == 4)) begin
_th_myfunc_b_2_called <= 1;
end
if(_th_myfunc_a_0_start[6] && (th_blink == 4)) begin
_th_myfunc_b_2_tid_32 <= _th_blink_tid_1;
end
if((th_blink == 4) && _th_myfunc_a_0_start[6]) begin
th_myfunc_b_2 <= th_myfunc_b_2_1;
end
end
th_myfunc_b_2_1: begin
_th_myfunc_b_2_tid_33 <= _th_myfunc_b_2_tid_32;
th_myfunc_b_2 <= th_myfunc_b_2_2;
end
th_myfunc_b_2_2: begin
$display("myfunc_b: tid = %d", _th_myfunc_b_2_tid_33);
th_myfunc_b_2 <= th_myfunc_b_2_3;
end
th_myfunc_b_2_3: begin
_th_myfunc_b_2_i_34 <= 0;
th_myfunc_b_2 <= th_myfunc_b_2_4;
end
th_myfunc_b_2_4: begin
if(_th_myfunc_b_2_i_34 < 30 - _th_myfunc_b_2_tid_33) begin
th_myfunc_b_2 <= th_myfunc_b_2_5;
end else begin
th_myfunc_b_2 <= th_myfunc_b_2_6;
end
end
th_myfunc_b_2_5: begin
_th_myfunc_b_2_i_34 <= _th_myfunc_b_2_i_34 + 1;
th_myfunc_b_2 <= th_myfunc_b_2_4;
end
th_myfunc_b_2_6: begin
_th_myfunc_b_2_tmp_35_36 <= _th_myfunc_b_2_tid_33 + 200;
th_myfunc_b_2 <= th_myfunc_b_2_7;
end
endcase
end
end
localparam th_myfunc_b_3_1 = 1;
localparam th_myfunc_b_3_2 = 2;
localparam th_myfunc_b_3_3 = 3;
localparam th_myfunc_b_3_4 = 4;
localparam th_myfunc_b_3_5 = 5;
localparam th_myfunc_b_3_6 = 6;
localparam th_myfunc_b_3_7 = 7;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_b_3 <= th_myfunc_b_3_init;
_th_myfunc_b_3_called <= 0;
_th_myfunc_b_3_tid_37 <= 0;
_th_myfunc_b_3_tid_38 <= 0;
_th_myfunc_b_3_i_39 <= 0;
_th_myfunc_b_3_tmp_40_41 <= 0;
end else begin
case(th_myfunc_b_3)
th_myfunc_b_3_init: begin
if(_th_myfunc_a_0_start[7] && (th_blink == 4)) begin
_th_myfunc_b_3_called <= 1;
end
if(_th_myfunc_a_0_start[7] && (th_blink == 4)) begin
_th_myfunc_b_3_tid_37 <= _th_blink_tid_1;
end
if((th_blink == 4) && _th_myfunc_a_0_start[7]) begin
th_myfunc_b_3 <= th_myfunc_b_3_1;
end
end
th_myfunc_b_3_1: begin
_th_myfunc_b_3_tid_38 <= _th_myfunc_b_3_tid_37;
th_myfunc_b_3 <= th_myfunc_b_3_2;
end
th_myfunc_b_3_2: begin
$display("myfunc_b: tid = %d", _th_myfunc_b_3_tid_38);
th_myfunc_b_3 <= th_myfunc_b_3_3;
end
th_myfunc_b_3_3: begin
_th_myfunc_b_3_i_39 <= 0;
th_myfunc_b_3 <= th_myfunc_b_3_4;
end
th_myfunc_b_3_4: begin
if(_th_myfunc_b_3_i_39 < 30 - _th_myfunc_b_3_tid_38) begin
th_myfunc_b_3 <= th_myfunc_b_3_5;
end else begin
th_myfunc_b_3 <= th_myfunc_b_3_6;
end
end
th_myfunc_b_3_5: begin
_th_myfunc_b_3_i_39 <= _th_myfunc_b_3_i_39 + 1;
th_myfunc_b_3 <= th_myfunc_b_3_4;
end
th_myfunc_b_3_6: begin
_th_myfunc_b_3_tmp_40_41 <= _th_myfunc_b_3_tid_38 + 200;
th_myfunc_b_3 <= th_myfunc_b_3_7;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_to_thread_pool.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
calvin/actorstore/systemactors/web/Pushbullet.py | gabrielcercel/calvin-base | 334 | 12722344 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, calvinsys, stateguard
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class Pushbullet(Actor):
"""
Post incoming tokens (text) to runtime specific pushbullet channel with given title
Input:
message : A message
"""
@manage(["title"])
def init(self, title):
self.title = title
self.setup()
def did_migrate(self):
self.setup()
def setup(self):
self._pb = calvinsys.open(self, "web.pushbullet.channel.post")
def teardown(self):
calvinsys.close(self._pb)
def will_migrate(self):
self.teardown()
def will_end(self):
self.teardown()
@stateguard(lambda self: self._pb and calvinsys.can_write(self._pb))
@condition(action_input=['message'])
def post_update(self, message):
calvinsys.write(self._pb, {"message": message, "title": self.title})
action_priority = (post_update,)
requires = ['web.pushbullet.channel.post']
test_kwargs = {'title': "Some Title"}
test_calvinsys = {'web.pushbullet.channel.post': {'write': [{'message': 'A message', 'title': 'Some Title'}]}}
test_set = [
{
'inports': {'message': ["A message"]}
}
]
|
src/simian/mac/models/properties.py | tristansgray/simian | 326 | 12722387 | <gh_stars>100-1000
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""App Engine Model Properties."""
from google.appengine.ext import db
from simian.mac.common import compress
from simian.mac.common import util
class SerializedProperty(db.TextProperty):
"""TextProperty class that serializes and deserializes data."""
# pylint: disable=g-bad-name
def get_value_for_datastore(self, model_instance):
"""Sends a serialized representation of self._obj to Datastore."""
if self._obj is None:
return None
else:
return db.Text(util.Serialize(self._obj))
# pylint: disable=g-bad-name
def __get__(self, model_instance, model_class):
"""Returns the already deserialized object."""
value = super(SerializedProperty, self).__get__(
model_instance, model_class)
# __get__ super returns self when the model_instance is None, which happens
# when the property is accessed by a static member of a class, as opposed to
# by an instance. When this happens, return our class instance.
if value is self:
return self
return self._obj
# pylint: disable=g-bad-name
def __set__(self, model_instance, value):
"""Deserializes db.Text values and simply sets other types to self._obj."""
if value is None or value == '':
super(SerializedProperty, self).__set__(model_instance, None)
elif type(value) is db.Text:
# If the value is is db.Text, deserialize it to init _obj.
self._obj = util.Deserialize(value)
else:
# If the incoming value is a not db.Text, it's an obj so just store it.
self._obj = value
class CompressedUtf8BlobProperty(db.BlobProperty):
"""BlobProperty class that compresses/decompresses seamlessly on get/set.
This Property is compressed on every __set__ and decompressed on every __get__
operation. This should be taken into consideration when performing certain
operations, such as slicing.
"""
# pylint: disable=g-bad-name
def get_value_for_datastore(self, model_instance):
"""Compresses the blob value on it's way to Datastore."""
value = super(CompressedUtf8BlobProperty, self).get_value_for_datastore(
model_instance)
if value is None:
self.length = 0
else:
self.length = len(value)
return db.Blob(
compress.CompressedText(value, encoding='utf-8').Compressed())
# pylint: disable=g-bad-name
def __get__(self, model_instance, model_class):
"""Decompresses the blob value when the property is accessed."""
value = super(CompressedUtf8BlobProperty, self).__get__(
model_instance, model_class)
# __get__ super returns self when the model_instance is None, which happens
# when the property is accessed by a static member of a class, as opposed to
# by an instance. When this happens, return our class instance.
if value is self:
return self
return unicode(
compress.CompressedText(value, encoding='utf-8')).encode('utf-8')
# pylint: disable=g-bad-name
def __set__(self, model_instance, value):
"""Compresses the value when the property is set."""
if not value:
self.length = 0
super(CompressedUtf8BlobProperty, self).__set__(model_instance, value)
else:
self.length = len(value)
value = compress.CompressedText(value, encoding='utf-8').Compressed()
super(CompressedUtf8BlobProperty, self).__set__(model_instance, value)
# pylint: disable=g-bad-name
def __len__(self):
"""Returns the length of the uncompressed blob data."""
return self.length
|
Modules/RefineDet/detect.py | Passer-D/GameAISDK | 1,210 | 12722410 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import os
import sys
import platform
__is_windows_system = platform.platform().lower().startswith('window')
__is_linux_system = platform.platform().lower().startswith('linux')
if __is_windows_system:
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(os.path.dirname(__file__), 'windows'))
from windows.detect_util import main
elif __is_linux_system:
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(os.path.dirname(__file__), 'ubuntu'))
from ubuntu.detect_util import main
else:
raise Exception('system is not support!')
"""
parser = argparse.ArgumentParser(description='RefineDet Training')
## basic configurations
parser.add_argument('-v', '--version', default='Refine_hc2net_version3',
help='Refine_vgg, Refine_mobile, Refine_hcnet, Refine_hc2net, Refine_hc2net_version2, Refine_hc2net_version3, '
'Refine_hc2net_version4, Refine_shufflenetv2, Refine_mobilenetv2, Refine_mobilenetv3, '
'Refine_mobilenetv3_version2, Refine_mobilenetv3_version3, Refine_resnet101, Refine_resnet101_heavy')
parser.add_argument('-s', '--size', default=320, type=int, help='320, 512 (512 support Refine_hc2net_version3, Refine_resnet101, Refine_resnet101_heavy)')
parser.add_argument('-d', '--dataset', default='self_dataset', help='VOC, COCO, OpenImage500, Objects365 or self dataset')
parser.add_argument('--num_classes', default=5, type=int, help='number of classes, including background')
## pretained model
parser.add_argument('-m', '--trained_model',
default='weights/Refine_hc2net_version3_320/model/Final_Refine_hc2net_version3_self_dataset.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--onnx_model',
default='weights/Refine_hc2net_version3_320/model/Final_Refine_hc2net_version3_self_dataset.onnx',
type=str, help='output onnx model')
## post processing
parser.add_argument('-n', '--nms_type', default='soft', help='nms type: normal, soft')
parser.add_argument('--obj_thresh', default=0.50, type=float, help='object threshold for testing')
parser.add_argument('--nms_thresh', default=0.45, type=float, help='nms threshold for testing')
## src images
parser.add_argument('-f', '--test_images', default='./test_images', help='test images can be folder, image or txt file')
parser.add_argument('--image_nums', default=100, type=int, help='maximum number of test images, -1 means all images in test_images')
parser.add_argument('--save_folder', default='eval/', type=str, help='Dir to save results')
parser.add_argument('--label_list', default='./test_dataset.txt', type=str, help='test image label list')
## platform
parser.add_argument('--cuda', default=False, type=str2bool, help='Use cuda to train model')
parser.add_argument('--inference_platform', default='pytorch', type=str, help='inference platform: caffe2, pytorch')
"""
if __name__ == "__main__":
main()
|
qanta/datasets/protobowl.py | Mshivam2409/qb | 122 | 12722414 | import os
import sys
import json
import codecs
import pickle
import pathlib
import itertools
import numpy as np
import pandas as pd
from collections import defaultdict
from functools import partial
from multiprocessing import Pool
from datetime import datetime
import matplotlib
matplotlib.use("Agg")
from plotnine import (
ggplot,
aes,
theme,
geom_density,
geom_histogram,
geom_point,
scale_color_gradient,
labs,
)
def process_log_line(x):
"""Process a single line of the log"""
obj = x["object"]
date = datetime.strptime(x["date"][:-6], "%a %b %d %Y %H:%M:%S %Z%z")
relative_position = obj["time_elapsed"] / obj["time_remaining"]
return (
[
date,
obj["guess"],
obj["qid"],
obj["time_elapsed"],
obj["time_remaining"],
relative_position,
obj["ruling"],
obj["user"]["id"],
],
obj["qid"],
obj["question_text"],
)
# remove duplicate records
def remove_duplicate(df_grouped, uid):
"""For each user, only take the first record for each question"""
group = df_grouped.get_group(uid)
user_questions = set()
index = group.date.sort_values()
rows = []
for _, row in group.loc[index.index].iterrows():
if row.qid in user_questions:
continue
user_questions.add(row.qid)
rows.append(row)
for j, row in enumerate(rows):
rows[j].user_n_records = len(rows)
return rows
def load_protobowl(
protobowl_dir="data/external/datasets/protobowl/protobowl-042818.log",
min_user_questions=20,
get_questions=False,
):
"""Parse protobowl log, return buzz data and questions.
Filter users that answered less than `min_user_questions` questions.
Remove duplicates: for each user, only keep the first record for each
question.
Args
protobowl_dir: json log
min_user_questions: minimum number of questions answered
Return
df: dataframe of buzzing records
questions: protobowl questions
"""
df_dir = protobowl_dir + ".h5"
question_dir = protobowl_dir + ".questions.pkl"
if os.path.exists(df_dir) and os.path.exists(df_dir):
with pd.HDFStore(df_dir) as store:
df = store["data"]
with open(question_dir, "rb") as f:
questions = pickle.load(f)
if get_questions:
return df, questions
else:
return df
# parse protobowl json log
data = []
count = 0
user_questions = defaultdict(set)
questions = dict()
with codecs.open(protobowl_dir, "r", "utf-8") as f:
line = f.readline()
while line is not None:
line = line.strip()
if len(line) < 1:
break
while not line.endswith("}}"):
_line = f.readline()
if _line is None:
break
line += _line.strip()
try:
line = json.loads(line)
except ValueError:
line = f.readline()
if line is None:
break
continue
count += 1
if count % 10000 == 0:
sys.stderr.write("\rdone: {}/5130000".format(count))
x, qid, question_text = process_log_line(line)
if qid not in questions:
questions[qid] = question_text
user_questions[x[-1]].add(qid) # x[-1] is uid
data.append(x)
line = f.readline()
# filter users without enough questions
filtered_data = []
for x in data:
uid = x[-1]
if len(user_questions[uid]) >= min_user_questions:
x.append(len(user_questions[uid]))
filtered_data.append(x)
df = pd.DataFrame(
filtered_data,
columns=[
"date",
"guess",
"qid",
"time_elapsed",
"time_remaining",
"relative_position",
"result",
"uid",
"user_n_records",
],
)
df_grouped = df.groupby("uid")
uids = list(df_grouped.groups.keys())
pool = Pool(8)
_remove_duplicate = partial(remove_duplicate, df_grouped)
user_rows = pool.map(_remove_duplicate, uids)
df = pd.DataFrame(list(itertools.chain(*user_rows)), columns=df.columns)
df_grouped = df.groupby("uid")
print("{} users".format(len(df_grouped)))
print("{} records".format(len(df)))
print("{} questions".format(len(set(df.qid))))
# save
with pd.HDFStore(df_dir) as store:
store["data"] = df
with open(question_dir, "wb") as f:
pickle.dump(questions, f)
if get_questions:
return df, questions
else:
return df
def plot():
outdir = "output/protobowl/"
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)
df = load_protobowl()
df.result = df.result.apply(lambda x: x is True)
df["log_n_records"] = df.user_n_records.apply(np.log)
df_user_grouped = df.groupby("uid")
user_stat = df_user_grouped.agg(np.mean)
print("{} users".format(len(user_stat)))
print("{} records".format(len(df)))
max_color = user_stat.log_n_records.max()
user_stat["alpha"] = pd.Series(
user_stat.log_n_records.apply(lambda x: x / max_color), index=user_stat.index
)
# 2D user plot
p0 = (
ggplot(user_stat)
+ geom_point(
aes(
x="relative_position",
y="result",
size="user_n_records",
color="log_n_records",
alpha="alpha",
),
show_legend={"color": False, "alpha": False, "size": False},
)
+ scale_color_gradient(high="#e31a1c", low="#ffffcc")
+ labs(x="Average buzzing position", y="Accuracy")
+ theme(aspect_ratio=1)
)
p0.save(os.path.join(outdir, "protobowl_users.pdf"))
# p0.draw()
print("p0 done")
# histogram of number of records
p1 = (
ggplot(user_stat, aes(x="log_n_records", y="..density.."))
+ geom_histogram(color="#e6550d", fill="#fee6ce")
+ geom_density()
+ labs(x="Log number of records", y="Density")
+ theme(aspect_ratio=0.3)
)
p1.save(os.path.join(outdir, "protobowl_hist.pdf"))
# p1.draw()
print("p1 done")
# histogram of accuracy
p2 = (
ggplot(user_stat, aes(x="result", y="..density.."))
+ geom_histogram(color="#31a354", fill="#e5f5e0")
+ geom_density()
+ labs(x="Accuracy", y="Density")
+ theme(aspect_ratio=0.3)
)
p2.save(os.path.join(outdir, "protobowl_acc.pdf"))
# p2.draw()
print("p2 done")
# histogram of buzzing position
p3 = (
ggplot(user_stat, aes(x="relative_position", y="..density.."))
+ geom_histogram(color="#3182bd", fill="#deebf7")
+ geom_density()
+ labs(x="Average buzzing position", y="Density")
+ theme(aspect_ratio=0.3)
)
p3.save(os.path.join(outdir, "protobowl_pos.pdf"))
# p3.draw()
print("p3 done")
if __name__ == "__main__":
plot()
|
Language Skills/Python/Unit 09 Exam Statistics/01 Review/1-Lets look at those grades.py | WarHatch/Codecademy-Exercise-Answers | 346 | 12722434 | <gh_stars>100-1000
grades = [100, 100, 90, 40, 80, 100, 85, 70, 90, 65, 90, 85, 50.5]
print "Grades:", grades
|
src/pylogit/bootstrap_utils.py | mathijsvdv/pylogit | 153 | 12722435 | <filename>src/pylogit/bootstrap_utils.py
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@name: Bootstrap Utilities
@summary: This module provides helpful functions for calculating the
bootstrap confidence intervals.
"""
from __future__ import absolute_import
from numbers import Number
import numpy as np
def check_conf_percentage_validity(conf_percentage):
"""
Ensures that `conf_percentage` is in (0, 100). Raises a helpful ValueError
if otherwise.
"""
msg = "conf_percentage MUST be a number between 0.0 and 100."
condition_1 = isinstance(conf_percentage, Number)
if not condition_1:
raise ValueError(msg)
else:
condition_2 = 0 < conf_percentage < 100
if not condition_2:
raise ValueError(msg)
return None
def ensure_samples_is_ndim_ndarray(samples, name='bootstrap', ndim=2):
"""
Ensures that `samples` is an `ndim` numpy array. Raises a helpful
ValueError if otherwise.
"""
assert isinstance(ndim, int)
assert isinstance(name, str)
if not isinstance(samples, np.ndarray) or not (samples.ndim == ndim):
sample_name = name + "_samples"
msg = "`{}` MUST be a {}D ndarray.".format(sample_name, ndim)
raise ValueError(msg)
return None
def get_alpha_from_conf_percentage(conf_percentage):
"""
Calculates `100 - conf_percentage`, which is useful for calculating alpha
levels.
"""
return 100.0 - conf_percentage
def combine_conf_endpoints(lower_array, upper_array):
"""
Concatenates upper and lower endpoint arrays for a given confidence level.
"""
return np.concatenate([lower_array[None, :], upper_array[None, :]], axis=0)
|
tests/integration/test_access_for_functions/test.py | pdv-ru/ClickHouse | 15,577 | 12722469 | import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance')
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_access_rights_for_funtion():
create_function_query = "CREATE FUNCTION MySum AS (a, b) -> a + b"
instance.query("CREATE USER A")
instance.query("CREATE USER B")
assert "it's necessary to have grant CREATE FUNCTION ON *.*" in instance.query_and_get_error(create_function_query, user = 'A')
instance.query("GRANT CREATE FUNCTION on *.* TO A")
instance.query(create_function_query, user = 'A')
assert instance.query("SELECT MySum(1, 2)") == "3\n"
assert "it's necessary to have grant DROP FUNCTION ON *.*" in instance.query_and_get_error("DROP FUNCTION MySum", user = 'B')
instance.query("GRANT DROP FUNCTION ON *.* TO B")
instance.query("DROP FUNCTION MySum", user = 'B')
assert "Unknown function MySum" in instance.query_and_get_error("SELECT MySum(1, 2)")
instance.query("REVOKE CREATE FUNCTION ON *.* FROM A")
assert "it's necessary to have grant CREATE FUNCTION ON *.*" in instance.query_and_get_error(create_function_query, user = 'A')
instance.query("DROP USER IF EXISTS A")
instance.query("DROP USER IF EXISTS B")
|
hwt/interfaces/agents/__init__.py | ufo2011/hwt | 134 | 12722491 | """
This package contains a UVM like simulation agents to handle IO between circuit running in simulator and the code which drives the simulation.
""" |
tools/perf/contrib/cluster_telemetry/screenshot_ct.py | zealoussnow/chromium | 14,668 | 12722496 | <filename>tools/perf/contrib/cluster_telemetry/screenshot_ct.py
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from contrib.cluster_telemetry import ct_benchmarks_util
from contrib.cluster_telemetry import page_set
from contrib.cluster_telemetry import repaint_helpers
from contrib.cluster_telemetry import screenshot
class ScreenshotCT(perf_benchmark.PerfBenchmark):
"""Captures PNG screenshots of web pages for Cluster Telemetry. Screenshots
written to local file with path-safe urls of pages as filenames. Cluster
Telemetry is then used for aggregation and analysis."""
@classmethod
def Name(cls):
return 'screenshot_ct'
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
ct_benchmarks_util.AddBenchmarkCommandLineArgs(parser)
parser.add_option('--png-outdir', type='string',
default=None,
help='Output directory for the PNG files')
parser.add_option('--wait-time', type='float', default=0,
help='Wait time before the benchmark is started')
parser.add_option('--dc-detect', action='store_true', dest='dc_detect',
default=False, help='Detects dynamic content by marking'
'pixels that were not consistent across multiple '
'screenshots with cyan')
parser.add_option('--dc-wait-time', type='float', default=1,
help='Wait time in between screenshots. Only applicable '
'if dc_detect flag is true.')
parser.add_option('--dc-extra-screenshots', type='int', default=1,
help='Number of extra screenshots taken to detect '
'dynamic content. Only applicable if dc_detect flag is '
'true.')
parser.add_option('--dc-threshold', type='float', default=0.5,
help='Maximum tolerable percentage of dynamic content '
'pixels. Raises an exception if percentage of dynamic '
'content is beyond this threshold. Only applicable if '
'dc_detect flag is true.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
ct_benchmarks_util.ValidateCommandLineArgs(parser, args)
if not args.png_outdir:
parser.error('Please specify --png-outdir')
def CreatePageTest(self, options):
return screenshot.Screenshot(options.png_outdir, options.wait_time,
options.dc_detect, options.dc_wait_time, options.dc_extra_screenshots,
options.dc_threshold)
def CreateStorySet(self, options):
return page_set.CTPageSet(
options.urls_list, options.user_agent, options.archive_data_file,
run_page_interaction_callback=repaint_helpers.WaitThenRepaint)
|
alipay/aop/api/domain/AlipayEcoLogisticsExpressNonserviceModifyModel.py | snowxmas/alipay-sdk-python-all | 213 | 12722523 | <filename>alipay/aop/api/domain/AlipayEcoLogisticsExpressNonserviceModifyModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AreaCode import AreaCode
class AlipayEcoLogisticsExpressNonserviceModifyModel(object):
def __init__(self):
self._area_codes = None
self._logis_merch_code = None
@property
def area_codes(self):
return self._area_codes
@area_codes.setter
def area_codes(self, value):
if isinstance(value, list):
self._area_codes = list()
for i in value:
if isinstance(i, AreaCode):
self._area_codes.append(i)
else:
self._area_codes.append(AreaCode.from_alipay_dict(i))
@property
def logis_merch_code(self):
return self._logis_merch_code
@logis_merch_code.setter
def logis_merch_code(self, value):
self._logis_merch_code = value
def to_alipay_dict(self):
params = dict()
if self.area_codes:
if isinstance(self.area_codes, list):
for i in range(0, len(self.area_codes)):
element = self.area_codes[i]
if hasattr(element, 'to_alipay_dict'):
self.area_codes[i] = element.to_alipay_dict()
if hasattr(self.area_codes, 'to_alipay_dict'):
params['area_codes'] = self.area_codes.to_alipay_dict()
else:
params['area_codes'] = self.area_codes
if self.logis_merch_code:
if hasattr(self.logis_merch_code, 'to_alipay_dict'):
params['logis_merch_code'] = self.logis_merch_code.to_alipay_dict()
else:
params['logis_merch_code'] = self.logis_merch_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoLogisticsExpressNonserviceModifyModel()
if 'area_codes' in d:
o.area_codes = d['area_codes']
if 'logis_merch_code' in d:
o.logis_merch_code = d['logis_merch_code']
return o
|
tests/algo/test_quickselect.py | gony0/buffalo | 577 | 12722533 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import time
from os import environ
environ["OMP_NUM_THREADS"] = "4"
environ["OPENBLAS_NUM_THREADS"] = "4"
environ["MKL_NUM_THREADS"] = "4"
environ["VECLIB_MAXIMUM_THREADS"] = "4"
environ["NUMEXPR_NUM_THREADS"] = "4"
import numpy as np
import unittest
from .base import TestBase
from buffalo.evaluate.base import Evaluable
scores = np.random.uniform(size=(100, 100000)).astype(np.float32)
topk = 10
def time_np_argsort():
st = time.time()
res = np.argsort(-scores)[:, :topk]
el = time.time() - st
return res, el
def time_np_argpartition():
st = time.time()
res = np.argpartition(-scores, topk)[:, :topk]
res = np.array([sorted(row, key=lambda x:-scores[i, x]) for i, row in enumerate(res)])
el = time.time() - st
return res, el
def time_quickselect():
ev = Evaluable()
st = time.time()
res = ev.get_topk(scores, k=topk, num_threads=4)
el = time.time() - st
return res, el
class TestQuickSelect(TestBase):
def test_0_quickselect(self):
res_argsort, t_np_argsort = time_np_argsort()
res_argpart, t_np_argparttion = time_np_argpartition()
res_quickselect, t_quickselect = time_quickselect()
self.assertGreaterEqual(t_np_argsort / t_quickselect, 1)
self.assertGreaterEqual(t_np_argparttion / t_quickselect, 1)
if __name__ == '__main__':
unittest.main()
|
tutorials/tutorial-6-hodmd.py | mfarthin/PyDMD | 451 | 12722567 | import matplotlib.pyplot as plt
import numpy as np
import time
from pydmd import HODMD
def myfunc(x):
return np.cos(x)*np.sin(np.cos(x)) + np.cos(x*.2)
x = np.linspace(0, 10, 64)
y = myfunc(x)
snapshots = y
plt.plot(x, snapshots, '.')
plt.show()
hodmd = HODMD(svd_rank=0, exact=True, opt=True, d=30).fit(snapshots)
hodmd.reconstructed_data.shape
hodmd.plot_eigs()
hodmd.original_time['dt'] = hodmd.dmd_time['dt'] = x[1] - x[0]
hodmd.original_time['t0'] = hodmd.dmd_time['t0'] = x[0]
hodmd.original_time['tend'] = hodmd.dmd_time['tend'] = x[-1]
plt.plot(hodmd.original_timesteps, snapshots, '.', label='snapshots')
plt.plot(hodmd.original_timesteps, y, '-', label='original function')
plt.plot(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label='DMD output')
plt.legend()
plt.show()
hodmd.dmd_time['tend'] = 50
fig = plt.figure(figsize=(15, 5))
plt.plot(hodmd.original_timesteps, snapshots, '.', label='snapshots')
plt.plot(np.linspace(0, 50, 128), myfunc(np.linspace(0, 50, 128)), '-', label='original function')
plt.plot(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label='DMD output')
plt.legend()
plt.show()
noise_range = [.01, .05, .1, .2]
fig = plt.figure(figsize=(15, 10))
future = 20
for id_plot, i in enumerate(noise_range, start=1):
snapshots = y + np.random.uniform(-i, i, size=y.shape)
hodmd = HODMD(svd_rank=0, exact=True, opt=True, d=30).fit(snapshots)
hodmd.original_time['dt'] = hodmd.dmd_time['dt'] = x[1] - x[0]
hodmd.original_time['t0'] = hodmd.dmd_time['t0'] = x[0]
hodmd.original_time['tend'] = hodmd.dmd_time['tend'] = x[-1]
hodmd.dmd_time['tend'] = 20
plt.subplot(2, 2, id_plot)
plt.plot(hodmd.original_timesteps, snapshots, '.', label='snapshots')
plt.plot(np.linspace(0, future, 128), myfunc(np.linspace(0, future, 128)), '-', label='original function')
plt.plot(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label='DMD output')
plt.legend()
plt.title('Noise [{} - {}]'.format(-i, i))
plt.show()
|
src/aptsources_cleanup/util/collections/abc.py | DazEB/aptsources-cleanup | 461 | 12722574 | # -*- coding: utf-8
"""Like the eponymous built-in module but with additional back-ported
functonality if any.
"""
__all__ = []
from collections.abc import *
from collections import abc as _abc
__all__ += _abc.__all__
try:
from _collections_abc import _check_methods
except ImportError:
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
if "Collection" not in locals():
__all__.append("Collection")
class Collection(Sized, Iterable, Container):
__slots__ = ()
@classmethod
def __subclasshook__(cls, C):
if cls is not Collection:
return NotImplemented
return _check_methods(C, "__len__", "__iter__", "__contains__")
Collection.register(Set)
Collection.register(Sequence)
Collection.register(Mapping)
|
sydent/validators/__init__.py | clmnin/sydent | 220 | 12722617 | <reponame>clmnin/sydent
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import attr
# how long a user can wait before validating a session after starting it
THREEPID_SESSION_VALIDATION_TIMEOUT_MS = 24 * 60 * 60 * 1000
# how long we keep sessions for after they've been validated
THREEPID_SESSION_VALID_LIFETIME_MS = 24 * 60 * 60 * 1000
@attr.s(frozen=True, slots=True, auto_attribs=True)
class ValidationSession:
id: int
medium: str
address: str
client_secret: str
validated: bool
mtime: int
@attr.s(frozen=True, slots=True, auto_attribs=True)
class TokenInfo:
token: str
send_attempt_number: int
class IncorrectClientSecretException(Exception):
pass
class SessionExpiredException(Exception):
pass
class InvalidSessionIdException(Exception):
pass
class IncorrectSessionTokenException(Exception):
pass
class SessionNotValidatedException(Exception):
pass
class DestinationRejectedException(Exception):
pass
|
alembic/versions/71aa3c94b7f5_juiceshop_updates.py | JanSkalny/RootTheBox | 635 | 12722624 | <reponame>JanSkalny/RootTheBox<filename>alembic/versions/71aa3c94b7f5_juiceshop_updates.py
"""JuiceShop Updates
Revision ID: <KEY>
Revises: 18d11f218dfe
Create Date: 2019-11-14 08:52:52.530520
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "18d11f218dfe"
branch_labels = None
depends_on = None
def upgrade():
op.alter_column(
"category",
"_category",
existing_type=sa.VARCHAR(length=24),
type_=sa.VARCHAR(length=64),
)
def downgrade():
op.alter_column(
"category",
"_category",
existing_type=sa.VARCHAR(length=64),
type_=sa.VARCHAR(length=24),
)
|
examples/errors/undefined_identifiers.py | uta8a/Jikka | 139 | 12722629 | <gh_stars>100-1000
def solve(x: int) -> int:
x = y # err
z = x + 1
return y # err
|
tests/nn/alpha.py | Filco306/TopologyLayer | 250 | 12722650 | <reponame>Filco306/TopologyLayer<gh_stars>100-1000
import unittest
import topologylayer
import torch
import numpy as np
from topologylayer.util.process import remove_zero_bars, remove_infinite_bars
class AlphaTest(unittest.TestCase):
def test(self):
from topologylayer.nn import AlphaLayer
# superlevel set
for alg in ['hom', 'hom2', 'cohom']:
layer = AlphaLayer(maxdim=1, alg=alg)
x = torch.tensor([[1, 1], [1,-1], [-1,-1], [-1,1]], dtype=torch.float).requires_grad_(True)
dgms, issub = layer(x)
self.assertEqual(
issub,
True,
"Expected sublevel set layer. alg=" + alg)
self.assertEqual(
torch.all(torch.eq(remove_infinite_bars(remove_zero_bars(dgms[0]), issub),
torch.tensor([[0., 2.], [0., 2.], [0., 2.]]))),
True,
"unexpected 0-dim barcode. alg=" + alg)
self.assertEqual(
torch.all(torch.eq(remove_zero_bars(dgms[1]),
torch.tensor([[2., 2.8284270763397217]]))),
True,
"unexpected 1-dim barcode. alg=" + alg)
d0 = remove_infinite_bars(remove_zero_bars(dgms[0]), issub)
p = torch.sum(d0[:, 1] - d0[:, 0])
p.backward()
self.assertEqual(
torch.all(torch.eq(x.grad,
torch.tensor([[1,1],[1,-1],[-1,0],[-1,0]], dtype=torch.float))),
True,
"unexpected gradient. alg=" + alg)
|
src/gluonts/dataset/repository/_tsf_datasets.py | Xiaoxiong-Liu/gluon-ts | 2,648 | 12722707 | <gh_stars>1000+
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Dict, List, NamedTuple, Optional
from urllib import request
from zipfile import ZipFile
from pandas.tseries.frequencies import to_offset
from gluonts import json
from gluonts.dataset import jsonl
from gluonts.dataset.field_names import FieldName
from gluonts.gluonts_tqdm import tqdm
from ._tsf_reader import TSFReader, frequency_converter
from ._util import metadata, request_retrieve_hook, to_dict
class Dataset(NamedTuple):
file_name: str
record: str
ROOT: str = "https://zenodo.org/record"
@property
def url(self):
return f"{self.ROOT}/{self.record}/files/{self.file_name}"
def download(self, path: Path):
file_path = path / self.file_name
with tqdm(
[],
unit="B",
unit_scale=True,
unit_divisor=1024,
miniters=5,
desc=f"Download {self.file_name}:",
) as _tqdm:
request.urlretrieve(
self.url,
filename=file_path,
reporthook=request_retrieve_hook(_tqdm),
)
return file_path
datasets = {
"kaggle_web_traffic_with_missing": Dataset(
file_name="kaggle_web_traffic_dataset_with_missing_values.zip",
record="4656080",
),
"kaggle_web_traffic_without_missing": Dataset(
file_name="kaggle_web_traffic_dataset_without_missing_values.zip",
record="4656075",
),
"kaggle_web_traffic_weekly": Dataset(
file_name="kaggle_web_traffic_weekly_dataset.zip",
record="4656664",
),
"m1_yearly": Dataset(file_name="m1_yearly_dataset.zip", record="4656193"),
"m1_quarterly": Dataset(
file_name="m1_quarterly_dataset.zip", record="4656154"
),
"m1_monthly": Dataset(
file_name="m1_monthly_dataset.zip", record="4656159"
),
"nn5_daily_with_missing": Dataset(
file_name="nn5_daily_dataset_with_missing_values.zip",
record="4656110",
),
"nn5_daily_without_missing": Dataset(
file_name="nn5_daily_dataset_without_missing_values.zip",
record="4656117",
),
"nn5_weekly": Dataset(
file_name="nn5_weekly_dataset.zip", record="4656125"
),
"tourism_monthly": Dataset(
file_name="tourism_monthly_dataset.zip",
record="4656096",
),
"tourism_quarterly": Dataset(
file_name="tourism_quarterly_dataset.zip",
record="4656093",
),
"tourism_yearly": Dataset(
file_name="tourism_yearly_dataset.zip",
record="4656103",
),
"cif_2016": Dataset(
file_name="cif_2016_dataset.zip",
record="4656042",
),
"london_smart_meters_without_missing": Dataset(
file_name="london_smart_meters_dataset_without_missing_values.zip",
record="4656091",
),
"wind_farms_without_missing": Dataset(
file_name="wind_farms_minutely_dataset_without_missing_values.zip",
record="4654858",
),
"car_parts_without_missing": Dataset(
file_name="car_parts_dataset_without_missing_values.zip",
record="4656021",
),
"dominick": Dataset(
file_name="dominick_dataset.zip",
record="4654802",
),
"fred_md": Dataset(
file_name="fred_md_dataset.zip",
record="4654833",
),
"pedestrian_counts": Dataset(
file_name="pedestrian_counts_dataset.zip",
record="4656626",
),
"hospital": Dataset(
file_name="hospital_dataset.zip",
record="4656014",
),
"covid_deaths": Dataset(
file_name="covid_deaths_dataset.zip",
record="4656009",
),
"kdd_cup_2018_without_missing": Dataset(
file_name="kdd_cup_2018_dataset_without_missing_values.zip",
record="4656756",
),
"weather": Dataset(
file_name="weather_dataset.zip",
record="4654822",
),
}
def save_metadata(
dataset_path: Path, cardinality: int, freq: str, prediction_length: int
):
with open(dataset_path / "metadata.json", "w") as file:
json.dump(
metadata(
cardinality=cardinality,
freq=freq,
prediction_length=prediction_length,
),
file,
)
def save_datasets(path: Path, data: List[Dict], train_offset: int):
train = path / "train"
test = path / "test"
train.mkdir(exist_ok=True)
test.mkdir(exist_ok=True)
with open(train / "data.json", "w") as train_fp, open(
test / "data.json", "w"
) as test_fp:
for data_entry in tqdm(
data, total=len(data), desc="creating json files"
):
dic = to_dict(
target_values=data_entry["target"],
start=str(data_entry["start_timestamp"]),
)
jsonl.dump([dic], test_fp)
dic["target"] = dic["target"][:-train_offset]
jsonl.dump([dic], train_fp)
def generate_forecasting_dataset(
dataset_path: Path,
dataset_name: str,
prediction_length: Optional[int] = None,
):
dataset = datasets[dataset_name]
dataset_path.mkdir(exist_ok=True)
with TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
with ZipFile(dataset.download(temp_path)) as archive:
archive.extractall(path=temp_path)
# only one file is exptected
reader = TSFReader(temp_path / archive.namelist()[0])
meta, data = reader.read()
freq = frequency_converter(meta.frequency)
if prediction_length is None:
if hasattr(meta, "forecast_horizon"):
prediction_length = int(meta.forecast_horizon)
else:
prediction_length = default_prediction_length_from_frequency(freq)
save_metadata(dataset_path, len(data), freq, prediction_length)
# Impute missing start dates with unix epoch and remove time series whose
# length is less than or equal to the prediction length
data = [
{**d, "start_timestamp": d.get("start_timestamp", "1970-01-01")}
for d in data
if len(d[FieldName.TARGET]) > prediction_length
]
save_datasets(dataset_path, data, prediction_length)
def default_prediction_length_from_frequency(freq: str) -> int:
prediction_length_map = {
"T": 60,
"H": 48,
"D": 30,
"W": 8,
"M": 12,
"Y": 4,
}
try:
freq = to_offset(freq).name
return prediction_length_map[freq]
except KeyError as err:
raise ValueError(
f"Cannot obtain default prediction length from frequency `{freq}`."
) from err
|
Packs/Ansible_Powered_Integrations/Integrations/VMwareV2/VMwareV2.py | diCagri/content | 799 | 12722717 | import json
import traceback
from typing import Dict, cast
import ansible_runner
import demistomock as demisto # noqa: F401
import ssh_agent_setup
from CommonServerPython import * # noqa: F401
# Dict to Markdown Converter adapted from https://github.com/PolBaladas/torsimany/
def dict2md(json_block, depth=0):
markdown = ""
if isinstance(json_block, dict):
markdown = parseDict(json_block, depth)
if isinstance(json_block, list):
markdown = parseList(json_block, depth)
return markdown
def parseDict(d, depth):
markdown = ""
for k in d:
if isinstance(d[k], (dict, list)):
markdown += addHeader(k, depth)
markdown += dict2md(d[k], depth + 1)
else:
markdown += buildValueChain(k, d[k], depth)
return markdown
def parseList(rawlist, depth):
markdown = ""
for value in rawlist:
if not isinstance(value, (dict, list)):
index = rawlist.index(value)
markdown += buildValueChain(index, value, depth)
else:
markdown += parseDict(value, depth)
return markdown
def buildHeaderChain(depth):
list_tag = '* '
htag = '#'
chain = list_tag * (bool(depth)) + htag * (depth + 1) + \
' value ' + (htag * (depth + 1) + '\n')
return chain
def buildValueChain(key, value, depth):
tab = " "
list_tag = '* '
chain = tab * (bool(depth - 1)) + list_tag + \
str(key) + ": " + str(value) + "\n"
return chain
def addHeader(value, depth):
chain = buildHeaderChain(depth)
chain = chain.replace('value', value.title())
return chain
# Remove ansible branding from results
def rec_ansible_key_strip(obj):
if isinstance(obj, dict):
return {key.replace('ansible_', ''): rec_ansible_key_strip(val) for key, val in obj.items()}
return obj
# COMMAND FUNCTIONS
def generic_ansible(integration_name, command, args: Dict[str, Any]) -> CommandResults:
readable_output = ""
sshkey = ""
fork_count = 1 # default to executing against 1 host at a time
if args.get('concurrency'):
fork_count = cast(int, args.get('concurrency'))
inventory: Dict[str, dict] = {}
inventory['all'] = {}
inventory['all']['hosts'] = {}
inventory['all']['hosts']['localhost'] = {}
inventory['all']['hosts']['localhost']['ansible_connection'] = 'local'
module_args = ""
# build module args list
for arg_key, arg_value in args.items():
# skip hardcoded host arg, as it doesn't related to module
if arg_key == 'host':
continue
module_args += "%s=\"%s\" " % (arg_key, arg_value)
# If this isn't host based, then all the integratation parms will be used as command args
for arg_key, arg_value in demisto.params().items():
module_args += "%s=\"%s\" " % (arg_key, arg_value)
r = ansible_runner.run(inventory=inventory, host_pattern='all', module=command, quiet=True,
omit_event_data=True, ssh_key=sshkey, module_args=module_args, forks=fork_count)
results = []
for each_host_event in r.events:
# Troubleshooting
# demisto.log("%s: %s\n" % (each_host_event['event'], each_host_event))
if each_host_event['event'] in ["runner_on_ok", "runner_on_unreachable", "runner_on_failed"]:
# parse results
result = json.loads('{' + each_host_event['stdout'].split('{', 1)[1])
host = each_host_event['stdout'].split('|', 1)[0].strip()
status = each_host_event['stdout'].replace('=>', '|').split('|', 3)[1]
# if successful build outputs
if each_host_event['event'] == "runner_on_ok":
if 'fact' in command:
result = result['ansible_facts']
else:
if result.get(command) is not None:
result = result[command]
else:
result.pop("ansible_facts", None)
result = rec_ansible_key_strip(result)
if host != "localhost":
readable_output += "# %s - %s\n" % (host, status)
else:
# This is integration is not host based
readable_output += "# %s\n" % status
readable_output += dict2md(result)
# add host and status to result
result['host'] = host
result['status'] = status
results.append(result)
if each_host_event['event'] == "runner_on_unreachable":
msg = "Host %s unreachable\nError Details: %s" % (host, result)
return_error(msg)
if each_host_event['event'] == "runner_on_failed":
msg = "Host %s failed running command\nError Details: %s" % (host, result)
return_error(msg)
# This is integration is not host based and always runs against localhost
results = results[0]
return CommandResults(
readable_output=readable_output,
outputs_prefix=integration_name + '.' + command,
outputs_key_field='',
outputs=results
)
# MAIN FUNCTION
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# SSH Key integration requires ssh_agent to be running in the background
ssh_agent_setup.setup()
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results('ok')
elif demisto.command() == 'vmware-about-info':
return_results(generic_ansible('vmwarev2', 'vmware_about_info', demisto.args()))
elif demisto.command() == 'vmware-category':
return_results(generic_ansible('vmwarev2', 'vmware_category', demisto.args()))
elif demisto.command() == 'vmware-category-info':
return_results(generic_ansible('vmwarev2', 'vmware_category_info', demisto.args()))
elif demisto.command() == 'vmware-cfg-backup':
return_results(generic_ansible('vmwarev2', 'vmware_cfg_backup', demisto.args()))
elif demisto.command() == 'vmware-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_cluster', demisto.args()))
elif demisto.command() == 'vmware-cluster-drs':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_drs', demisto.args()))
elif demisto.command() == 'vmware-cluster-ha':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_ha', demisto.args()))
elif demisto.command() == 'vmware-cluster-info':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_info', demisto.args()))
elif demisto.command() == 'vmware-cluster-vsan':
return_results(generic_ansible('vmwarev2', 'vmware_cluster_vsan', demisto.args()))
elif demisto.command() == 'vmware-content-deploy-template':
return_results(generic_ansible('vmwarev2', 'vmware_content_deploy_template', demisto.args()))
elif demisto.command() == 'vmware-content-library-info':
return_results(generic_ansible('vmwarev2', 'vmware_content_library_info', demisto.args()))
elif demisto.command() == 'vmware-content-library-manager':
return_results(generic_ansible('vmwarev2', 'vmware_content_library_manager', demisto.args()))
elif demisto.command() == 'vmware-datacenter':
return_results(generic_ansible('vmwarev2', 'vmware_datacenter', demisto.args()))
elif demisto.command() == 'vmware-datastore-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_cluster', demisto.args()))
elif demisto.command() == 'vmware-datastore-info':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_info', demisto.args()))
elif demisto.command() == 'vmware-datastore-maintenancemode':
return_results(generic_ansible('vmwarev2', 'vmware_datastore_maintenancemode', demisto.args()))
elif demisto.command() == 'vmware-dns-config':
return_results(generic_ansible('vmwarev2', 'vmware_dns_config', demisto.args()))
elif demisto.command() == 'vmware-drs-group':
return_results(generic_ansible('vmwarev2', 'vmware_drs_group', demisto.args()))
elif demisto.command() == 'vmware-drs-group-info':
return_results(generic_ansible('vmwarev2', 'vmware_drs_group_info', demisto.args()))
elif demisto.command() == 'vmware-drs-rule-info':
return_results(generic_ansible('vmwarev2', 'vmware_drs_rule_info', demisto.args()))
elif demisto.command() == 'vmware-dvs-host':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_host', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup-find':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup_find', demisto.args()))
elif demisto.command() == 'vmware-dvs-portgroup-info':
return_results(generic_ansible('vmwarev2', 'vmware_dvs_portgroup_info', demisto.args()))
elif demisto.command() == 'vmware-dvswitch':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-lacp':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_lacp', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-nioc':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_nioc', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-pvlans':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_pvlans', demisto.args()))
elif demisto.command() == 'vmware-dvswitch-uplink-pg':
return_results(generic_ansible('vmwarev2', 'vmware_dvswitch_uplink_pg', demisto.args()))
elif demisto.command() == 'vmware-evc-mode':
return_results(generic_ansible('vmwarev2', 'vmware_evc_mode', demisto.args()))
elif demisto.command() == 'vmware-folder-info':
return_results(generic_ansible('vmwarev2', 'vmware_folder_info', demisto.args()))
elif demisto.command() == 'vmware-guest':
return_results(generic_ansible('vmwarev2', 'vmware_guest', demisto.args()))
elif demisto.command() == 'vmware-guest-boot-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_boot_info', demisto.args()))
elif demisto.command() == 'vmware-guest-boot-manager':
return_results(generic_ansible('vmwarev2', 'vmware_guest_boot_manager', demisto.args()))
elif demisto.command() == 'vmware-guest-custom-attribute-defs':
return_results(generic_ansible('vmwarev2', 'vmware_guest_custom_attribute_defs', demisto.args()))
elif demisto.command() == 'vmware-guest-custom-attributes':
return_results(generic_ansible('vmwarev2', 'vmware_guest_custom_attributes', demisto.args()))
elif demisto.command() == 'vmware-guest-customization-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_customization_info', demisto.args()))
elif demisto.command() == 'vmware-guest-disk':
return_results(generic_ansible('vmwarev2', 'vmware_guest_disk', demisto.args()))
elif demisto.command() == 'vmware-guest-disk-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_disk_info', demisto.args()))
elif demisto.command() == 'vmware-guest-find':
return_results(generic_ansible('vmwarev2', 'vmware_guest_find', demisto.args()))
elif demisto.command() == 'vmware-guest-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_info', demisto.args()))
elif demisto.command() == 'vmware-guest-move':
return_results(generic_ansible('vmwarev2', 'vmware_guest_move', demisto.args()))
elif demisto.command() == 'vmware-guest-network':
return_results(generic_ansible('vmwarev2', 'vmware_guest_network', demisto.args()))
elif demisto.command() == 'vmware-guest-powerstate':
return_results(generic_ansible('vmwarev2', 'vmware_guest_powerstate', demisto.args()))
elif demisto.command() == 'vmware-guest-screenshot':
return_results(generic_ansible('vmwarev2', 'vmware_guest_screenshot', demisto.args()))
elif demisto.command() == 'vmware-guest-sendkey':
return_results(generic_ansible('vmwarev2', 'vmware_guest_sendkey', demisto.args()))
elif demisto.command() == 'vmware-guest-snapshot':
return_results(generic_ansible('vmwarev2', 'vmware_guest_snapshot', demisto.args()))
elif demisto.command() == 'vmware-guest-snapshot-info':
return_results(generic_ansible('vmwarev2', 'vmware_guest_snapshot_info', demisto.args()))
elif demisto.command() == 'vmware-guest-tools-upgrade':
return_results(generic_ansible('vmwarev2', 'vmware_guest_tools_upgrade', demisto.args()))
elif demisto.command() == 'vmware-guest-tools-wait':
return_results(generic_ansible('vmwarev2', 'vmware_guest_tools_wait', demisto.args()))
elif demisto.command() == 'vmware-guest-video':
return_results(generic_ansible('vmwarev2', 'vmware_guest_video', demisto.args()))
elif demisto.command() == 'vmware-guest-vnc':
return_results(generic_ansible('vmwarev2', 'vmware_guest_vnc', demisto.args()))
elif demisto.command() == 'vmware-host':
return_results(generic_ansible('vmwarev2', 'vmware_host', demisto.args()))
elif demisto.command() == 'vmware-host-acceptance':
return_results(generic_ansible('vmwarev2', 'vmware_host_acceptance', demisto.args()))
elif demisto.command() == 'vmware-host-active-directory':
return_results(generic_ansible('vmwarev2', 'vmware_host_active_directory', demisto.args()))
elif demisto.command() == 'vmware-host-capability-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_capability_info', demisto.args()))
elif demisto.command() == 'vmware-host-config-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_config_info', demisto.args()))
elif demisto.command() == 'vmware-host-config-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_config_manager', demisto.args()))
elif demisto.command() == 'vmware-host-datastore':
return_results(generic_ansible('vmwarev2', 'vmware_host_datastore', demisto.args()))
elif demisto.command() == 'vmware-host-dns-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_dns_info', demisto.args()))
elif demisto.command() == 'vmware-host-facts':
return_results(generic_ansible('vmwarev2', 'vmware_host_facts', demisto.args()))
elif demisto.command() == 'vmware-host-feature-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_feature_info', demisto.args()))
elif demisto.command() == 'vmware-host-firewall-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_firewall_info', demisto.args()))
elif demisto.command() == 'vmware-host-firewall-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_firewall_manager', demisto.args()))
elif demisto.command() == 'vmware-host-hyperthreading':
return_results(generic_ansible('vmwarev2', 'vmware_host_hyperthreading', demisto.args()))
elif demisto.command() == 'vmware-host-ipv6':
return_results(generic_ansible('vmwarev2', 'vmware_host_ipv6', demisto.args()))
elif demisto.command() == 'vmware-host-kernel-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_kernel_manager', demisto.args()))
elif demisto.command() == 'vmware-host-lockdown':
return_results(generic_ansible('vmwarev2', 'vmware_host_lockdown', demisto.args()))
elif demisto.command() == 'vmware-host-ntp':
return_results(generic_ansible('vmwarev2', 'vmware_host_ntp', demisto.args()))
elif demisto.command() == 'vmware-host-ntp-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_ntp_info', demisto.args()))
elif demisto.command() == 'vmware-host-package-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_package_info', demisto.args()))
elif demisto.command() == 'vmware-host-powermgmt-policy':
return_results(generic_ansible('vmwarev2', 'vmware_host_powermgmt_policy', demisto.args()))
elif demisto.command() == 'vmware-host-powerstate':
return_results(generic_ansible('vmwarev2', 'vmware_host_powerstate', demisto.args()))
elif demisto.command() == 'vmware-host-scanhba':
return_results(generic_ansible('vmwarev2', 'vmware_host_scanhba', demisto.args()))
elif demisto.command() == 'vmware-host-service-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_service_info', demisto.args()))
elif demisto.command() == 'vmware-host-service-manager':
return_results(generic_ansible('vmwarev2', 'vmware_host_service_manager', demisto.args()))
elif demisto.command() == 'vmware-host-snmp':
return_results(generic_ansible('vmwarev2', 'vmware_host_snmp', demisto.args()))
elif demisto.command() == 'vmware-host-ssl-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_ssl_info', demisto.args()))
elif demisto.command() == 'vmware-host-vmhba-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_vmhba_info', demisto.args()))
elif demisto.command() == 'vmware-host-vmnic-info':
return_results(generic_ansible('vmwarev2', 'vmware_host_vmnic_info', demisto.args()))
elif demisto.command() == 'vmware-local-role-info':
return_results(generic_ansible('vmwarev2', 'vmware_local_role_info', demisto.args()))
elif demisto.command() == 'vmware-local-role-manager':
return_results(generic_ansible('vmwarev2', 'vmware_local_role_manager', demisto.args()))
elif demisto.command() == 'vmware-local-user-info':
return_results(generic_ansible('vmwarev2', 'vmware_local_user_info', demisto.args()))
elif demisto.command() == 'vmware-local-user-manager':
return_results(generic_ansible('vmwarev2', 'vmware_local_user_manager', demisto.args()))
elif demisto.command() == 'vmware-maintenancemode':
return_results(generic_ansible('vmwarev2', 'vmware_maintenancemode', demisto.args()))
elif demisto.command() == 'vmware-migrate-vmk':
return_results(generic_ansible('vmwarev2', 'vmware_migrate_vmk', demisto.args()))
elif demisto.command() == 'vmware-object-role-permission':
return_results(generic_ansible('vmwarev2', 'vmware_object_role_permission', demisto.args()))
elif demisto.command() == 'vmware-portgroup':
return_results(generic_ansible('vmwarev2', 'vmware_portgroup', demisto.args()))
elif demisto.command() == 'vmware-portgroup-info':
return_results(generic_ansible('vmwarev2', 'vmware_portgroup_info', demisto.args()))
elif demisto.command() == 'vmware-resource-pool':
return_results(generic_ansible('vmwarev2', 'vmware_resource_pool', demisto.args()))
elif demisto.command() == 'vmware-resource-pool-info':
return_results(generic_ansible('vmwarev2', 'vmware_resource_pool_info', demisto.args()))
elif demisto.command() == 'vmware-tag':
return_results(generic_ansible('vmwarev2', 'vmware_tag', demisto.args()))
elif demisto.command() == 'vmware-tag-info':
return_results(generic_ansible('vmwarev2', 'vmware_tag_info', demisto.args()))
elif demisto.command() == 'vmware-tag-manager':
return_results(generic_ansible('vmwarev2', 'vmware_tag_manager', demisto.args()))
elif demisto.command() == 'vmware-target-canonical-info':
return_results(generic_ansible('vmwarev2', 'vmware_target_canonical_info', demisto.args()))
elif demisto.command() == 'vmware-vcenter-settings':
return_results(generic_ansible('vmwarev2', 'vmware_vcenter_settings', demisto.args()))
elif demisto.command() == 'vmware-vcenter-statistics':
return_results(generic_ansible('vmwarev2', 'vmware_vcenter_statistics', demisto.args()))
elif demisto.command() == 'vmware-vm-host-drs-rule':
return_results(generic_ansible('vmwarev2', 'vmware_vm_host_drs_rule', demisto.args()))
elif demisto.command() == 'vmware-vm-info':
return_results(generic_ansible('vmwarev2', 'vmware_vm_info', demisto.args()))
elif demisto.command() == 'vmware-vm-shell':
return_results(generic_ansible('vmwarev2', 'vmware_vm_shell', demisto.args()))
elif demisto.command() == 'vmware-vm-storage-policy-info':
return_results(generic_ansible('vmwarev2', 'vmware_vm_storage_policy_info', demisto.args()))
elif demisto.command() == 'vmware-vm-vm-drs-rule':
return_results(generic_ansible('vmwarev2', 'vmware_vm_vm_drs_rule', demisto.args()))
elif demisto.command() == 'vmware-vm-vss-dvs-migrate':
return_results(generic_ansible('vmwarev2', 'vmware_vm_vss_dvs_migrate', demisto.args()))
elif demisto.command() == 'vmware-vmkernel':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel', demisto.args()))
elif demisto.command() == 'vmware-vmkernel-info':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel_info', demisto.args()))
elif demisto.command() == 'vmware-vmkernel-ip-config':
return_results(generic_ansible('vmwarev2', 'vmware_vmkernel_ip_config', demisto.args()))
elif demisto.command() == 'vmware-vmotion':
return_results(generic_ansible('vmwarev2', 'vmware_vmotion', demisto.args()))
elif demisto.command() == 'vmware-vsan-cluster':
return_results(generic_ansible('vmwarev2', 'vmware_vsan_cluster', demisto.args()))
elif demisto.command() == 'vmware-vspan-session':
return_results(generic_ansible('vmwarev2', 'vmware_vspan_session', demisto.args()))
elif demisto.command() == 'vmware-vswitch':
return_results(generic_ansible('vmwarev2', 'vmware_vswitch', demisto.args()))
elif demisto.command() == 'vmware-vswitch-info':
return_results(generic_ansible('vmwarev2', 'vmware_vswitch_info', demisto.args()))
elif demisto.command() == 'vmware-vsphere-file':
return_results(generic_ansible('vmwarev2', 'vsphere_file', demisto.args()))
elif demisto.command() == 'vmware-vcenter-extension':
return_results(generic_ansible('vmwarev2', 'vcenter_extension', demisto.args()))
elif demisto.command() == 'vmware-vcenter-extension-info':
return_results(generic_ansible('vmwarev2', 'vcenter_extension_info', demisto.args()))
elif demisto.command() == 'vmware-vcenter-folder':
return_results(generic_ansible('vmwarev2', 'vcenter_folder', demisto.args()))
elif demisto.command() == 'vmware-vcenter-license':
return_results(generic_ansible('vmwarev2', 'vcenter_license', demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
# ENTRY POINT
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
fake news challenge (FNC-1)/eval_fnc.py | kishormishra3/DeepLearn | 1,756 | 12722734 | '''
Evaluation code for the SICK dataset (SemEval 2014 Task 1)
'''
import numpy as np
import os.path
from util import *
from sklearn.metrics import mean_squared_error as mse
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from sklearn.utils import shuffle
from utils.score import report_score
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam
from sklearn.metrics import accuracy_score
from keras.utils.np_utils import to_categorical
import pandas as pd
from sklearn.model_selection import train_test_split
import random
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, Merge, Embedding
from keras import regularizers
from keras.layers import Merge, Input, Multiply, Layer
from sklearn.preprocessing import StandardScaler
def split(train_l,train_r,label,ratio):
total = train_l.shape[0]
train_samples = int(total*(1-ratio))
test_samples = total-train_samples
tr_l,tst_l,tr_r,tst_r,l_tr,l_tst=[],[],[],[],[],[]
dat=random.sample(range(total),train_samples)
for a in dat:
tr_l.append(train_l[a])
tr_r.append(train_r[a])
l_tr.append(label[a])
print 'splitting - validation samples ',test_samples
for i in range(total):
if i not in dat:
tst_l.append(train_l[i])
tst_r.append(train_r[i])
l_tst.append(label[i])
print 'splitting - train samples ',len(dat)
tr_l = np.array(tr_l)
tr_r = np.array(tr_r)
tst_l = np.array(tst_l)
tst_r = np.array(tst_r)
l_tr = np.array(l_tr)
l_tst = np.array(l_tst)
return tr_l,tst_l,tr_r,tst_r,l_tr,l_tst
def load_dataset(file_trhead, file_trbody, file_tshead, file_tsbody):
trhead = pd.read_csv(file_trhead)
trbody = pd.read_csv(file_trbody)
tshead = pd.read_csv(file_tshead)
tsbody = pd.read_csv(file_tsbody)
tr_head_array = trhead.values
tr_body_array = trbody.values
ts_head_array = tshead.values
ts_body_array = tsbody.values
tr_labels = tr_head_array[:,2]
ts_labels = ts_head_array[:,2]
tr_body_id = tr_head_array[:,1]
train_dh = tr_head_array[:,0] ##########
train_db = []
for i in range(len(tr_head_array)):
for j in range(len(tr_body_array)):
if tr_body_array[j][0] == tr_body_id[i]:
train_db.append(tr_body_array[j][1])
break
tr_lab = []
for i in tr_labels:
if i == 'unrelated':
tr_lab.append(3)
if i == 'agree':
tr_lab.append(0)
if i == 'discuss':
tr_lab.append(2)
if i == 'disagree':
tr_lab.append(1)
train_db = np.array(train_db) ##############
ts_body_id = ts_head_array[:,1]
test_dh = ts_head_array[:,0] ##########
test_db = []
for i in range(len(ts_head_array)):
for j in range(len(ts_body_array)):
if ts_body_array[j][0] == ts_body_id[i]:
test_db.append(ts_body_array[j][1])
break
ts_lab = []
for i in ts_labels:
if i == 'unrelated':
ts_lab.append(3)
if i == 'agree':
ts_lab.append(0)
if i == 'discuss':
ts_lab.append(2)
if i == 'disagree':
ts_lab.append(1)
test_db= np.array(test_db) #############
#signs=['?','.',]
print("Refining train datset")
train_rdh = []
for i in range(len(train_dh)):
sentence = ""
for char in train_dh[i]:
if char.isalpha() or char == ' ':
sentence+=char.lower()
else:
sentence+=' '
train_rdh.append(sentence)
train_rdb = []
for i in range(len(train_db)):
sentence = ""
for char in train_db[i]:
if char.isalpha() or char == ' ':
sentence+=char.lower()
else:
sentence+=' '
train_rdb.append(sentence)
print("Refining test datset")
test_rdh = []
for i in range(len(test_dh)):
sentence = ""
for char in test_dh[i]:
if char.isalpha() or char == ' ':
sentence+=char.lower()
else:
sentence+=' '
test_rdh.append(sentence)
test_rdb = []
for i in range(len(test_db)):
sentence = ""
for char in test_db[i]:
if char.isalpha() or char == ' ':
sentence+=char.lower()
else:
sentence+=' '
test_rdb.append(sentence)
dic = pd.read_pickle('stop_dic')
train_new_rdb = []
test_new_rdb = []
word_limit = 250
print 'removing stop words and using', word_limit,'words limit .....'
for i in train_rdb:
temp=[]
for j in i.split():
try:
a=dic[j]
except:
temp.append(j)
train_new_rdb.append(' '.join(temp[0:min(len(temp),word_limit)]))
for i in test_rdb:
temp=[]
for j in i.split():
try:
a=dic[j]
except:
temp.append(j)
test_new_rdb.append(' '.join(temp[0:min(len(temp),word_limit)]))
train_rdh = np.array(train_rdh)
test_rdh = np.array(test_rdh)
train_new_rdb = np.array(train_new_rdb)
test_new_rdb = np.array(test_new_rdb)
return train_rdh, train_new_rdb, test_rdh, test_new_rdb
#tr_h, dev_h, tr_b, dev_b, tr_s, dev_s = split(np.array(train_rdh), np.array(train_rdb), tr_lab, 0.2)
#return [tr_h, tr_b], [dev_h, dev_b], [tr_s, dev_s]
def evaluate(encoder=None, seed=1234, evaltest=False, loc='./data/'):
"""
Run experiment
"""
print 'Preparing data for fnc...'
#train, dev, test, scores = load_data(loc)
#train[0], train[1], scores[0] = shuffle(train[0], train[1], scores[0], random_state=seed)
'''
trh, trb, tsh, tsb =\
load_dataset("/fnc_data/train_stances.csv", "/fnc_data/train_bodies.csv",\
"/fnc_data/competition_test_stances.csv", "/fnc_data/test_bodies.csv")
'''
train_h = np.load('/fncdata2/encode_train_head.npy')
train_b = np.load('/fncdata2/encode_train_body.npy')
test_h = np.load('/fncdata2/encode_test_head.npy')
test_b = np.load('/fncdata2/encode_test_body.npy')
score_train = np.load('/fncdata2/score_train.npy')
score_test = np.load('/fncdata2/score_test.npy')
#train_b = big_mat
#train_h, dev_h, train_b, dev_b, score_train, dev_score = split(np.array(train_h), train_b, score_train, 0.2)
print 'loading training skipthoughts...'
#trainA = encoder.encode(train_h, verbose=False, use_eos=True)
#trainB = encoder.encode(train_b, verbose=False, use_eos=True)
trainA = train_h
trainB = train_b
print 'Computing development skipthoughts...'
#devA = encoder.encode(dev_h, verbose=False, use_eos=True)
#devB = encoder.encode(dev_b, verbose=False, use_eos=True)
# devA = dev_h
# devB = dev_b
devA = test_h
devB = test_b
dev_score = score_test
print 'Computing feature combinations...'
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
devF = np.c_[np.abs(devA - devB), devA * devB]
print 'Encoding labels...'
#trainY = encode_labels(train_labels)
#devY = encode_labels(holdout_labels)
trainY = to_categorical(score_train, 4)
devY = to_categorical(dev_score, 4)
train_Fx, test_Fx = load_features()
#fmodel = generate_feature_model(train_Fx, score_train, test_Fx, dev_score, ninputs=len(train_Fx[0]))
train_tfidf, test_tfidf = generate_tfidf()
print 'Compiling model...'
lrmodel = prepare_model(ninputs=trainF.shape[1],n_feats=train_Fx.shape[1],n_tfidf=train_tfidf.shape[1])
print 'Training...'
bestlrmodel = train_model(lrmodel, trainF, trainY, devF, devY, dev_score, train_Fx, test_Fx, train_tfidf, test_tfidf)
if evaltest:
print 'Loading test skipthoughts...'
testA = test_h
testB = test_b
print 'Computing feature combinations...'
testF = np.c_[np.abs(testA - testB), testA * testB]
yhat = bestlrmodel.predict(testF, verbose=2)
yhat = [i.argmax()for i in yhat]
string_predicted,test_stances = [],[]
for i,j in zip(yhat,score_test):
if i == 3:
string_predicted.append('unrelated')
if i == 0:
string_predicted.append('agree')
if i == 2:
string_predicted.append('discuss')
if i == 1:
string_predicted.append('disagree')
if j == 3:
test_stances.append('unrelated')
if j == 0:
test_stances.append('agree')
if j == 2:
test_stances.append('discuss')
if j == 1:
test_stances.append('disagree')
report_score(test_stances,string_predicted)
score = accuracy_score(score_test, yhat)
print 'accuracy is ..',score
#print 'Evaluating...'
def generate_tfidf():
file_train_instances = "/fncdata/train_stances.csv"
file_train_bodies = "/fncdata/train_bodies.csv"
file_test_instances = "/fncdata/competition_test_stances.csv"
file_test_bodies = "/fncdata/test_bodies.csv"
raw_train = FNCData(file_train_instances, file_train_bodies)
raw_test = FNCData(file_test_instances, file_test_bodies)
n_train = len(raw_train.instances)
lim_unigram = 5000
train_set, train_stances, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer = \
pipeline_train(raw_train, raw_test, lim_unigram=lim_unigram)
#feature_size = len(train_set[0])
test_set = pipeline_test(raw_test, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer)
return np.array(train_set), np.array(test_set)
def prepare_model(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.2)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
#m1 = Dropout(0.2)(m1)
#m1 = Dense(4, activation='sigmoid')(m1)
#m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(50,activation='relu')(inp2)
#m2=Dense(4,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.4)(m3)
m3 = Dense(50, activation='relu')(m3)
#m3 = Dropout(0.4)(m3)
#m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
def prepare_model2(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.2)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
#m1 = Dropout(0.2)(m1)
#m1 = Dense(4, activation='sigmoid')(m1)
m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(4,activation='relu')(inp2)
#m2=Dense(4,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.4)(m3)
m3 = Dense(50, activation='relu')(m3)
#m3 = Dropout(0.4)(m3)
#m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
def prepare_model1(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.5)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
m1 = Dropout(0.5)(m1)
m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(30,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.6)(m3)
m3 = Dense(100, activation='relu')(m3)
m3 = Dropout(0.4)(m3)
m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
"""
Set up and compile the model architecture (Logistic regression)
print 'changed'
out_neurons1 = 500
lrmodel = Sequential()
lrmodel.add(Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001)))
lrmodel.add(Dropout(0.5))
#lrmodel.add(Dense(out_neurons2))
#lrmodel.add(Dropout(0.5))
lrmodel.add(Dense(output_dim=nclass))
#lrmodel.add(Dense(input_dim=ninputs, output_dim=nclass))
#lrmodel.add(Dropout(0.3))
lrmodel.add(Activation('softmax'))
lrmodel.compile(loss='categorical_crossentropy', optimizer='adam')
return lrmodel
"""
def train_model(lrmodel, X, Y, devX, devY, devscores, feat_train, feat_dev, train_tfidf, test_tfidf):
"""
Train model, using pearsonr on dev for early stopping
"""
done = False
best = -1.0
#r = np.arange(1,5)
while not done:
# Every 100 epochs, check Pearson on development set
lrmodel.fit([X,feat_train,train_tfidf], Y, verbose=2, shuffle=False, nb_epoch = 3, validation_data=([devX,feat_dev,test_tfidf], devY))
#yhat = np.dot(lrmodel.predict(devX, verbose=2), r)
yhat = lrmodel.predict([devX,feat_dev,test_tfidf], verbose=2)
yhat = [i.argmax()for i in yhat]
string_predicted,test_stances = [],[]
for i,j in zip(yhat,devscores):
if i == 3:
string_predicted.append('unrelated')
if i == 0:
string_predicted.append('agree')
if i == 2:
string_predicted.append('discuss')
if i == 1:
string_predicted.append('disagree')
if j == 3:
test_stances.append('unrelated')
if j == 0:
test_stances.append('agree')
if j == 2:
test_stances.append('discuss')
if j == 1:
test_stances.append('disagree')
print 'using new limit value....'
#score = accuracy_score(devscores, yhat)
score = report_score(test_stances,string_predicted,val=True)
#return lrmodel
if score > best:
print score
best = score
bestlrmodel = prepare_model(ninputs=X.shape[1],n_feats=feat_train.shape[1],n_tfidf=train_tfidf.shape[1])
bestlrmodel.set_weights(lrmodel.get_weights())
else:
done = True
print '***** best model obtained with score',best,'******'
yhat = bestlrmodel.predict([devX, feat_dev, test_tfidf], verbose=2)
yhat = [i.argmax()for i in yhat]
string_predicted,test_stances = [],[]
for i,j in zip(yhat,devscores):
if i == 3:
string_predicted.append('unrelated')
if i == 0:
string_predicted.append('agree')
if i == 2:
string_predicted.append('discuss')
if i == 1:
string_predicted.append('disagree')
if j == 3:
test_stances.append('unrelated')
if j == 0:
test_stances.append('agree')
if j == 2:
test_stances.append('discuss')
if j == 1:
test_stances.append('disagree')
report_score(test_stances,string_predicted)
return bestlrmodel
import math
def load_features():
train_hand = np.load('/fncdata3/hand.train.npy')
#train_overlap = np.load('/fncdata3/overlap.train.npy')
#train_refuting = np.load('/fncdata3/refuting.train.npy')
#train_polarity = np.load('/fncdata3/polarity.train.npy')
test_hand = np.load('/fncdata3/hand.test.npy')
#test_overlap = np.load('/fncdata3/overlap.test.npy')
#test_refuting = np.load('/fncdata3/refuting.test.npy')
#test_polarity = np.load('/fncdata3/polarity.test.npy')
'''
train_other = np.load('/fncdata4/x_train.npy')
test_other = np.load('/fncdata4/x_test.npy')
train_other = train_other[:,16]
test_other = test_other[:,16]
#train_X = np.c_[train_polarity, train_refuting, train_overlap]
#test_X = np.c_[test_polarity, test_refuting, test_overlap]
for k,i in enumerate(test_other):
if math.isnan(i):
#print 'here',k
test_other[k] = 0.0
train_X = np.c_[train_hand, train_other]
test_X = np.c_[test_hand, test_other]
train_feat = np.load('/fncdata3/feat_train.npy')
train_other = np.load('/fncdata3/x_train.npy')
test_feat = np.load('/fncdata3/feat_test.npy')
test_other = np.load('/fncdata3/x_test.npy')
train_X = np.c_[train_feat, train_other]
test_X = np.c_[test_feat, test_other]
for k,i in enumerate(test_X):
for ind,j in enumerate(i):
if math.isnan(j):
#print 'here',k
test_X[k][ind] = 0.0
ss = StandardScaler()
ss.fit(np.vstack((train_X, test_X)))
feat1_train = ss.transform(train_X)
feat1_test = ss.transform(test_X)
#feat_dev = feat1_train[len(trainF):]
#feat1_train = feat1_train[0:len(trainF)]
#feat_dev = feat1_test
'''
return train_hand, test_hand |
scripts/webgraph.py | fabratu/networkit | 366 | 12722740 | from NetworKit import *
import urllib.parse
import collections
def analyzeWebCommunities(graphPath, urlsPath):
print("reading input...")
G = readGraph(graphPath)
urls = LineFileReader().read(urlsPath)
urlmap = retrieveAttributes(G.nodes(), urls)
print("community detection...")
zeta = LabelPropagation().run(G)
communitySizes = zeta.clusterSizes()
communityIds = [i for i in range(len(communitySizes)) if communitySizes[i] > 0]
netlocMap = {} # community -> size distribution of netlocs
for cid in communityIds:
if communitySizes[cid] > 100: # filter
community = zeta.getMembers(cid)
urllib.parse.urlparse(v).netloc
def getNetloc2nodes(G, urls):
netlocs = [urllib.parse.urlparse(url).netloc for url in urls]
netloc2nodes = {}
for u in range(netlocs):
uloc = netlocs[u]
if uloc not in netloc2nodes:
netlocs2nodes[uloc] = []
netloc2nodes[uloc].append(u)
return netloc2nodes
def toLocations(urls):
""" Turn the list of urls into a list of locations"""
errors = 0 # count the parser errors
locs = []
for url in urls:
try:
parsed = urllib.parse.urlparse(url)
locs.append(parsed.netloc)
except:
print("URL parser error occurred")
errors += 1
locs.append(None)
return locs
def getLocations(nodes, urls):
""" Given a collection of nodes, return a set of net locations (domains) to which they belong"""
theurls = dict((u, urls[u]) for u in nodes)
loclist = [urllib.parse.urlparse(url).netloc for url in theurls]
def matchAndIndex(substring, ls, exact=False):
matching = {}
if exact:
i = 0
for s in ls:
if substring == s:
matching[i] = s
i += 1
else:
i = 0
for s in ls:
if substring in s:
matching[i] = s
i += 1
return matching
def writeSeeds(match, filename):
file = open(filename, "w")
string = ",".join([str(s) for s in match.keys()])
file.write(string)
file.close()
def matchLocs(substring, ls):
match = matchAndIndex(substring, ls)
locs = set(match.values())
return locs |
a_nice_mc/utils/hmc.py | tjansse2/a-nice-mc | 118 | 12722754 | <gh_stars>100-1000
import time
import numpy as np
import tensorflow as tf
from a_nice_mc.utils.logger import create_logger
logger = create_logger(__name__)
def kinetic_energy(v):
return 0.5 * tf.reduce_sum(tf.multiply(v, v), axis=1)
def hamiltonian(p, v, f):
"""
Return the value of the Hamiltonian
:param p: position variable
:param v: velocity variable
:param f: energy function
:return: hamiltonian
"""
return f(p) + kinetic_energy(v)
def metropolis_hastings_accept(energy_prev, energy_next):
"""
Run Metropolis-Hastings algorithm for 1 step
:param energy_prev:
:param energy_next:
:return: Tensor of boolean values, indicating accept or reject
"""
energy_diff = energy_prev - energy_next
return (tf.exp(energy_diff) - tf.random_uniform(tf.shape(energy_prev))) >= 0.0
def simulate_dynamics(initial_pos, initial_vel, stepsize, n_steps, energy_fn):
def leapfrog(pos, vel, step, i):
de_dp_ = tf.gradients(tf.reduce_sum(energy_fn(pos)), pos)[0]
new_vel_ = vel - step * de_dp_
new_pos_ = pos + step * new_vel_
return [new_pos_, new_vel_, step, tf.add(i, 1)]
def condition(pos, vel, step, i):
return tf.less(i, n_steps)
de_dp = tf.gradients(tf.reduce_sum(energy_fn(initial_pos)), initial_pos)[0]
vel_half_step = initial_vel - 0.5 * stepsize * de_dp
pos_full_step = initial_pos + stepsize * vel_half_step
i = tf.constant(0)
final_pos, new_vel, _, _ = tf.while_loop(condition, leapfrog, [pos_full_step, vel_half_step, stepsize, i])
de_dp = tf.gradients(tf.reduce_sum(energy_fn(final_pos)), final_pos)[0]
final_vel = new_vel - 0.5 * stepsize * de_dp
return final_pos, final_vel
def hmc_move(initial_pos, energy_fn, stepsize, n_steps):
initial_vel = tf.random_normal(tf.shape(initial_pos))
final_pos, final_vel = simulate_dynamics(
initial_pos=initial_pos,
initial_vel=initial_vel,
stepsize=stepsize,
n_steps=n_steps,
energy_fn=energy_fn
)
accept = metropolis_hastings_accept(
energy_prev=hamiltonian(initial_pos, initial_vel, energy_fn),
energy_next=hamiltonian(final_pos, final_vel, energy_fn)
)
return accept, final_pos, final_vel
def hmc_updates(initial_pos, stepsize, avg_acceptance_rate, final_pos, accept,
target_acceptance_rate, stepsize_inc, stepsize_dec,
stepsize_min, stepsize_max, avg_acceptance_slowness):
new_pos = tf.where(accept, final_pos, initial_pos)
new_stepsize_ = tf.multiply(
stepsize,
tf.where(tf.greater(avg_acceptance_rate, target_acceptance_rate), stepsize_inc, stepsize_dec)
)
new_stepsize = tf.maximum(tf.minimum(new_stepsize_, stepsize_max), stepsize_min)
new_acceptance_rate = tf.add(
avg_acceptance_slowness * avg_acceptance_rate,
(1.0 - avg_acceptance_slowness) * tf.reduce_mean(tf.to_float(accept))
)
return new_pos, new_stepsize, new_acceptance_rate
class HamiltonianMonteCarloSampler(object):
"""
TensorFlow implementation for Hamiltonian Monte Carlo
"""
def __init__(self, energy_fn, prior, stepsize=0.1, n_steps=10,
target_acceptance_rate=0.65, avg_acceptance_slowness=0.9,
stepsize_min=0.001, stepsize_max=1000.0, stepsize_dec=0.97, stepsize_inc=1.03,
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1,
sess=False):
self.energy_fn = energy_fn
self.prior = prior
self.z = self.energy_fn.z
self.stepsize = tf.constant(stepsize)
self.avg_acceptance_rate = tf.constant(target_acceptance_rate)
self.sess = sess
def fn(zsa, x):
z, s, a = zsa
accept, final_pos, final_vel = hmc_move(
z,
energy_fn,
s,
n_steps
)
z_, s_, a_ = hmc_updates(
z,
s,
avg_acceptance_rate=a,
final_pos=final_pos,
accept=accept,
stepsize_min=stepsize_min,
stepsize_max=stepsize_max,
stepsize_dec=stepsize_dec,
stepsize_inc=stepsize_inc,
target_acceptance_rate=target_acceptance_rate,
avg_acceptance_slowness=avg_acceptance_slowness
)
return z_, s_, a_
self.steps = tf.placeholder(tf.int32, [])
elems = tf.zeros([self.steps])
self.z_, self.stepsize_, self.avg_acceptance_rate_ = tf.scan(
fn, elems,
(self.z, self.stepsize, self.avg_acceptance_rate),
back_prop=False
)
if not self.sess:
# only want to start a session if running this independently
self.sess = tf.Session(
config=tf.ConfigProto(
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads
)
)
self.sess.run(tf.global_variables_initializer())
def sample(self, steps, batch_size):
start = time.time()
z, stepsize, acceptance_rate = self.sess.run(
[self.z_, self.stepsize_, self.avg_acceptance_rate_],
feed_dict={self.steps: steps, self.z: self.prior(batch_size)}
)
end = time.time()
logger.info('batches [%d] steps [%d] time [%5.4f] steps/s [%5.4f]' %
(batch_size, steps, end - start, steps * batch_size / (end - start)))
logger.info('average recent acceptance rate [%5.4f]' % np.mean(acceptance_rate[-int(steps*0.1):]))
z = np.transpose(z, [1, 0, 2])
return z
|
backpack/extensions/firstorder/batch_grad/__init__.py | jabader97/backpack | 395 | 12722776 | """Contains the backpropagation extension for grad_batch: BatchGrad.
It defines the module extension for each module.
"""
from typing import List
from torch.nn import (
LSTM,
RNN,
BatchNorm1d,
BatchNorm2d,
BatchNorm3d,
Conv1d,
Conv2d,
Conv3d,
ConvTranspose1d,
ConvTranspose2d,
ConvTranspose3d,
Embedding,
Linear,
)
from backpack.extensions.firstorder.base import FirstOrderBackpropExtension
from . import (
batchnorm_nd,
conv1d,
conv2d,
conv3d,
conv_transpose1d,
conv_transpose2d,
conv_transpose3d,
embedding,
linear,
rnn,
)
class BatchGrad(FirstOrderBackpropExtension):
"""Individual gradients for each sample in a minibatch.
Stores the output in ``grad_batch`` as a ``[N x ...]`` tensor,
where ``N`` batch size and ``...`` is the shape of the gradient.
If ``subsampling`` is specified, ``N`` is replaced by the number of active
samples.
.. note::
Beware of scaling issue
The `individual gradients` depend on the scaling of the overall function.
Let ``fᵢ`` be the loss of the ``i`` th sample, with gradient ``gᵢ``.
``BatchGrad`` will return
- ``[g₁, …, gₙ]`` if the loss is a sum, ``∑ᵢ₌₁ⁿ fᵢ``,
- ``[¹/ₙ g₁, …, ¹/ₙ gₙ]`` if the loss is a mean, ``¹/ₙ ∑ᵢ₌₁ⁿ fᵢ``.
The concept of individual gradients is only meaningful if the
objective is a sum of independent functions (no batchnorm).
"""
def __init__(self, subsampling: List[int] = None):
"""Initialization.
Defines extension for each module.
Args:
subsampling: Indices of samples in the mini-batch for which individual
gradients will be computed. Defaults to ``None`` (use all samples).
"""
super().__init__(
savefield="grad_batch",
module_exts={
Linear: linear.BatchGradLinear(),
Conv1d: conv1d.BatchGradConv1d(),
Conv2d: conv2d.BatchGradConv2d(),
Conv3d: conv3d.BatchGradConv3d(),
ConvTranspose1d: conv_transpose1d.BatchGradConvTranspose1d(),
ConvTranspose2d: conv_transpose2d.BatchGradConvTranspose2d(),
ConvTranspose3d: conv_transpose3d.BatchGradConvTranspose3d(),
BatchNorm1d: batchnorm_nd.BatchGradBatchNormNd(),
BatchNorm2d: batchnorm_nd.BatchGradBatchNormNd(),
BatchNorm3d: batchnorm_nd.BatchGradBatchNormNd(),
RNN: rnn.BatchGradRNN(),
LSTM: rnn.BatchGradLSTM(),
Embedding: embedding.BatchGradEmbedding(),
},
subsampling=subsampling,
)
|
dataset/DeepFakes/faceswap-master/lib/gui/stats.py | MrThiago/FaceForensics | 1,930 | 12722800 | #!/usr/bin python3
""" Stats functions for the GUI """
import time
import os
import warnings
from math import ceil, sqrt
import numpy as np
from lib.Serializer import PickleSerializer
class SavedSessions(object):
""" Saved Training Session """
def __init__(self, sessions_data):
self.serializer = PickleSerializer
self.sessions = self.load_sessions(sessions_data)
def load_sessions(self, filename):
""" Load previously saved sessions """
stats = list()
if os.path.isfile(filename):
with open(filename, self.serializer.roptions) as sessions:
stats = self.serializer.unmarshal(sessions.read())
return stats
def save_sessions(self, filename):
""" Save the session file """
with open(filename, self.serializer.woptions) as session:
session.write(self.serializer.marshal(self.sessions))
print("Saved session stats to: {}".format(filename))
class CurrentSession(object):
""" The current training session """
def __init__(self):
self.stats = {"iterations": 0,
"batchsize": None, # Set and reset by wrapper
"timestamps": [],
"loss": [],
"losskeys": []}
self.timestats = {"start": None,
"elapsed": None}
self.modeldir = None # Set and reset by wrapper
self.filename = None
self.historical = None
def initialise_session(self, currentloss):
""" Initialise the training session """
self.load_historical()
for item in currentloss:
self.stats["losskeys"].append(item[0])
self.stats["loss"].append(list())
self.timestats["start"] = time.time()
def load_historical(self):
""" Load historical data and add current session to the end """
self.filename = os.path.join(self.modeldir, "trainingstats.fss")
self.historical = SavedSessions(self.filename)
self.historical.sessions.append(self.stats)
def add_loss(self, currentloss):
""" Add a loss item from the training process """
if self.stats["iterations"] == 0:
self.initialise_session(currentloss)
self.stats["iterations"] += 1
self.add_timestats()
for idx, item in enumerate(currentloss):
self.stats["loss"][idx].append(float(item[1]))
def add_timestats(self):
""" Add timestats to loss dict and timestats """
now = time.time()
self.stats["timestamps"].append(now)
elapsed_time = now - self.timestats["start"]
self.timestats["elapsed"] = time.strftime("%H:%M:%S",
time.gmtime(elapsed_time))
def save_session(self):
""" Save the session file to the modeldir """
if self.stats["iterations"] > 0:
print("Saving session stats...")
self.historical.save_sessions(self.filename)
class SessionsTotals(object):
""" The compiled totals of all saved sessions """
def __init__(self, all_sessions):
self.stats = {"split": [],
"iterations": 0,
"batchsize": [],
"timestamps": [],
"loss": [],
"losskeys": []}
self.initiate(all_sessions)
self.compile(all_sessions)
def initiate(self, sessions):
""" Initiate correct losskey titles and number of loss lists """
for losskey in sessions[0]["losskeys"]:
self.stats["losskeys"].append(losskey)
self.stats["loss"].append(list())
def compile(self, sessions):
""" Compile all of the sessions into totals """
current_split = 0
for session in sessions:
iterations = session["iterations"]
current_split += iterations
self.stats["split"].append(current_split)
self.stats["iterations"] += iterations
self.stats["timestamps"].extend(session["timestamps"])
self.stats["batchsize"].append(session["batchsize"])
self.add_loss(session["loss"])
def add_loss(self, session_loss):
""" Add loss vals to each of their respective lists """
for idx, loss in enumerate(session_loss):
self.stats["loss"][idx].extend(loss)
class SessionsSummary(object):
""" Calculations for analysis summary stats """
def __init__(self, raw_data):
self.summary = list()
self.summary_stats_compile(raw_data)
def summary_stats_compile(self, raw_data):
""" Compile summary stats """
raw_summaries = list()
for idx, session in enumerate(raw_data):
raw_summaries.append(self.summarise_session(idx, session))
totals_summary = self.summarise_totals(raw_summaries)
raw_summaries.append(totals_summary)
self.format_summaries(raw_summaries)
# Compile Session Summaries
@staticmethod
def summarise_session(idx, session):
""" Compile stats for session passed in """
starttime = session["timestamps"][0]
endtime = session["timestamps"][-1]
elapsed = endtime - starttime
# Bump elapsed to 0.1s if no time is recorded
# to hack around div by zero error
elapsed = 0.1 if elapsed == 0 else elapsed
rate = (session["batchsize"] * session["iterations"]) / elapsed
return {"session": idx + 1,
"start": starttime,
"end": endtime,
"elapsed": elapsed,
"rate": rate,
"batch": session["batchsize"],
"iterations": session["iterations"]}
@staticmethod
def summarise_totals(raw_summaries):
""" Compile the stats for all sessions combined """
elapsed = 0
rate = 0
batchset = set()
iterations = 0
total_summaries = len(raw_summaries)
for idx, summary in enumerate(raw_summaries):
if idx == 0:
starttime = summary["start"]
if idx == total_summaries - 1:
endtime = summary["end"]
elapsed += summary["elapsed"]
rate += summary["rate"]
batchset.add(summary["batch"])
iterations += summary["iterations"]
batch = ",".join(str(bs) for bs in batchset)
return {"session": "Total",
"start": starttime,
"end": endtime,
"elapsed": elapsed,
"rate": rate / total_summaries,
"batch": batch,
"iterations": iterations}
def format_summaries(self, raw_summaries):
""" Format the summaries nicely for display """
for summary in raw_summaries:
summary["start"] = time.strftime("%x %X",
time.gmtime(summary["start"]))
summary["end"] = time.strftime("%x %X",
time.gmtime(summary["end"]))
summary["elapsed"] = time.strftime("%H:%M:%S",
time.gmtime(summary["elapsed"]))
summary["rate"] = "{0:.1f}".format(summary["rate"])
self.summary = raw_summaries
class Calculations(object):
""" Class to hold calculations against raw session data """
def __init__(self,
session,
display="loss",
selections=["raw"],
avg_samples=10,
flatten_outliers=False,
is_totals=False):
warnings.simplefilter("ignore", np.RankWarning)
self.session = session
if display.lower() == "loss":
display = self.session["losskeys"]
else:
display = [display]
self.args = {"display": display,
"selections": selections,
"avg_samples": int(avg_samples),
"flatten_outliers": flatten_outliers,
"is_totals": is_totals}
self.iterations = 0
self.stats = None
self.refresh()
def refresh(self):
""" Refresh the stats """
self.iterations = 0
self.stats = self.get_raw()
self.get_calculations()
self.remove_raw()
def get_raw(self):
""" Add raw data to stats dict """
raw = dict()
for idx, item in enumerate(self.args["display"]):
if item.lower() == "rate":
data = self.calc_rate(self.session)
else:
data = self.session["loss"][idx][:]
if self.args["flatten_outliers"]:
data = self.flatten_outliers(data)
if self.iterations == 0:
self.iterations = len(data)
raw["raw_{}".format(item)] = data
return raw
def remove_raw(self):
""" Remove raw values from stats if not requested """
if "raw" in self.args["selections"]:
return
for key in list(self.stats.keys()):
if key.startswith("raw"):
del self.stats[key]
def calc_rate(self, data):
""" Calculate rate per iteration
NB: For totals, gaps between sessions can be large
so time diffeence has to be reset for each session's
rate calculation """
batchsize = data["batchsize"]
if self.args["is_totals"]:
split = data["split"]
else:
batchsize = [batchsize]
split = [len(data["timestamps"])]
prev_split = 0
rate = list()
for idx, current_split in enumerate(split):
prev_time = data["timestamps"][prev_split]
timestamp_chunk = data["timestamps"][prev_split:current_split]
for item in timestamp_chunk:
current_time = item
timediff = current_time - prev_time
iter_rate = 0 if timediff == 0 else batchsize[idx] / timediff
rate.append(iter_rate)
prev_time = current_time
prev_split = current_split
if self.args["flatten_outliers"]:
rate = self.flatten_outliers(rate)
return rate
@staticmethod
def flatten_outliers(data):
""" Remove the outliers from a provided list """
retdata = list()
samples = len(data)
mean = (sum(data) / samples)
limit = sqrt(sum([(item - mean)**2 for item in data]) / samples)
for item in data:
if (mean - limit) <= item <= (mean + limit):
retdata.append(item)
else:
retdata.append(mean)
return retdata
def get_calculations(self):
""" Perform the required calculations """
for selection in self.get_selections():
if selection[0] == "raw":
continue
method = getattr(self, "calc_{}".format(selection[0]))
key = "{}_{}".format(selection[0], selection[1])
raw = self.stats["raw_{}".format(selection[1])]
self.stats[key] = method(raw)
def get_selections(self):
""" Compile a list of data to be calculated """
for summary in self.args["selections"]:
for item in self.args["display"]:
yield summary, item
def calc_avg(self, data):
""" Calculate rolling average """
avgs = list()
presample = ceil(self.args["avg_samples"] / 2)
postsample = self.args["avg_samples"] - presample
datapoints = len(data)
if datapoints <= (self.args["avg_samples"] * 2):
print("Not enough data to compile rolling average")
return avgs
for idx in range(0, datapoints):
if idx < presample or idx >= datapoints - postsample:
avgs.append(None)
continue
else:
avg = sum(data[idx - presample:idx + postsample]) \
/ self.args["avg_samples"]
avgs.append(avg)
return avgs
@staticmethod
def calc_trend(data):
""" Compile trend data """
points = len(data)
if points < 10:
dummy = [None for i in range(points)]
return dummy
x_range = range(points)
fit = np.polyfit(x_range, data, 3)
poly = np.poly1d(fit)
trend = poly(x_range)
return trend
|
exps/refinenet_root2/train.py | zju3dv/SMAP | 209 | 12722840 | <reponame>zju3dv/SMAP
import os
import os.path as osp
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from dataset.p2p_dataset import P2PDataset
from model.refinenet import RefineNet
from config import cfg
checkpoint_dir = cfg.CHECKPOINT_DIR
os.makedirs(checkpoint_dir, exist_ok=True)
def main():
train_dataset = P2PDataset(dataset_path=cfg.DATA_DIR, root_idx=cfg.DATASET.ROOT_IDX)
train_loader = DataLoader(train_dataset, batch_size=cfg.SOLVER.BATCH_SIZE, shuffle=True)
model = RefineNet()
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
if len(cfg.MODEL.GPU_IDS) > 1:
model = nn.parallel.DataParallel(model, device_ids=cfg.MODEL.GPU_IDS)
optimizer = optim.Adam(model.parameters(), lr=cfg.SOLVER.BASE_LR, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=cfg.SOLVER.LR_STEP_SIZE, gamma=cfg.SOLVER.GAMMA, last_epoch=-1)
criterion = nn.MSELoss()
model.train()
for epoch in range(1, cfg.SOLVER.NUM_EPOCHS+1):
total_loss = 0
count = 0
for i, (inp, gt) in enumerate(train_loader):
count += 1
inp = inp.to(device)
gt = gt.to(device)
preds = model(inp)
loss = criterion(preds, gt)
total_loss += loss.data.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
avg_loss = total_loss / count
if epoch % cfg.PRINT_FREQ == 0:
print("epoch: {} | loss: {}.".format(epoch, avg_loss))
if epoch % cfg.SAVE_FREQ == 0 or epoch == cfg.SOLVER.NUM_EPOCHS:
torch.save(model.state_dict(), osp.join(checkpoint_dir, "RefineNet_epoch_%03d.pth" % epoch))
if __name__ == "__main__":
main()
|
kafka/tools/models/broker.py | akashvacher/kafka-tools | 578 | 12722962 | <filename>kafka/tools/models/broker.py<gh_stars>100-1000
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
import re
import socket
import struct
import time
from kafka.tools import log
from kafka.tools.configuration import ClientConfiguration
from kafka.tools.models import BaseModel
from kafka.tools.exceptions import ConfigurationException, ConnectionError
from kafka.tools.protocol.types.bytebuffer import ByteBuffer
from kafka.tools.utilities import json_loads
class Endpoint(BaseModel):
equality_attrs = ['protocol', 'hostname', 'port']
def __init__(self, protocol, hostname, port):
self.protocol = protocol
self.hostname = hostname
self.port = port
class Broker(BaseModel):
equality_attrs = ['hostname', 'id']
@property
def hostname(self):
return self.endpoint.hostname
@hostname.setter
def hostname(self, value):
self.endpoint.hostname = value
@property
def port(self):
return self.endpoint.port
@port.setter
def port(self, value):
self.endpoint.port = value
def __init__(self, hostname, id=0, port=9092, sock=None, configuration=None):
self.id = id
self.endpoint = Endpoint('', hostname, port)
self.jmx_port = -1
self.rack = None
self.version = None
self.endpoints = None
self.timestamp = None
self.cluster = None
self.partitions = {}
self.endpoints = []
self._sock = sock
self._correlation_id = 1
self._configuration = configuration or ClientConfiguration()
@classmethod
def create_from_json(cls, broker_id, jsondata):
data = json_loads(jsondata)
# These things are required, and we can't proceed if they're not there
try:
newbroker = cls(data['host'], id=broker_id, port=data['port'])
except KeyError:
raise ConfigurationException("Cannot parse broker data in zookeeper. This version of Kafka may not be supported.")
# These things are optional, and are pulled in for convenience or extra features
for attr in ['jmx_port', 'rack', 'version', 'timestamp']:
try:
setattr(newbroker, attr, data[attr])
except KeyError:
pass
# if the broker defines multiple endpoints,
newbroker._set_endpoints(data.get('endpoints', []))
return newbroker
def _set_endpoints(self, endpoints):
endpoint_re = re.compile("(.*)://(.*):([0-9]+)", re.I)
for endpoint in endpoints:
m = endpoint_re.match(endpoint)
if m is not None:
self.endpoints.append(Endpoint(m.group(1), m.group(2), int(m.group(3))))
# Shallow copy - do not copy partitions map over
def copy(self):
newbroker = Broker(self.hostname, id=self.id, port=self.port)
newbroker.jmx_port = self.jmx_port
newbroker.port = self.port
newbroker.rack = self.rack
newbroker.version = self.version
newbroker.endpoints = self.endpoints
newbroker.timestamp = self.timestamp
newbroker.cluster = self.cluster
return newbroker
def num_leaders(self):
return self.num_partitions_at_position(0)
def num_partitions_at_position(self, pos=0):
if pos in self.partitions:
return len(self.partitions[pos])
else:
return pos
def percent_leaders(self):
if self.num_partitions() == 0:
return 0.0
return (self.num_leaders() / self.num_partitions()) * 100
def total_size(self):
return sum([p.size for pos in self.partitions for p in self.partitions[pos]], 0)
def num_partitions(self):
return sum([len(self.partitions[pos]) for pos in self.partitions], 0)
def get_endpoint(self, protocol):
for endpoint in self.endpoints:
if endpoint.protocol == protocol:
return endpoint
return self.endpoint
def _get_socket(self, sslcontext):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if sslcontext is not None:
sock = sslcontext.wrap_socket(sock, server_hostname=self.hostname)
return sock
def connect(self):
protocol = 'SSL' if self._configuration.ssl_context is not None else 'PLAINTEXT'
endpoint = self.get_endpoint(protocol)
log.info("Connecting to {0} on port {1} using {2}".format(self.hostname, self.port, protocol))
try:
self._sock = self._sock or self._get_socket(self._configuration.ssl_context)
self._sock.connect((endpoint.hostname, endpoint.port))
except socket.error as e:
log.error("Cannot connect to broker {0}:{1}: {2}".format(endpoint.hostname, endpoint.port, e))
raise ConnectionError("Cannot connect to broker {0}:{1}: {2}".format(endpoint.hostname, endpoint.port, e))
def close(self):
log.info("Disconnecting from {0}".format(self.hostname))
# Shutdown throws an error if the socket is not connected, but that's OK
try:
self._sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
self._sock.close()
self._sock = None
def send(self, request):
attempts = 0
while attempts < self._configuration.num_retries:
attempts += 1
try:
# Connect to the broker if not currently connected
if self._sock is None:
self.connect()
return self._single_send(request)
except ConnectionError as e:
if attempts >= self._configuration.num_retries:
log.error("Failed communicating with Kafka broker {0}. retries remaining = 0: {1}".format(self.id, e))
raise
else:
log.warn("Failed communicating with Kafka broker {0}. retries remaining = {1}: {2}".format(self.id,
self._configuration.num_retries - attempts,
e))
# Sleep for the backoff period before retrying the request, and force a reconnect
self.close()
time.sleep(self._configuration.retry_backoff)
def _single_send(self, request):
# Build the payload based on the request passed in. We'll fill in the size at the end
buf = ByteBuffer(self._configuration.max_request_size)
buf.putInt32(0)
buf.putInt16(request.api_key)
buf.putInt16(request.api_version)
buf.putInt32(self._correlation_id)
buf.putInt16(len(self._configuration.client_id))
buf.put(struct.pack('{0}s'.format(len(self._configuration.client_id)), self._configuration.client_id.encode("utf-8")))
request.encode(buf)
# Close the payload and write the size (payload size without the size field itself)
buf.limit = buf.position - 1
payload_len = buf.capacity - 4
buf.rewind()
buf.putInt32(payload_len)
buf.rewind()
# Increment the correlation ID for the next request
self._correlation_id += 1
try:
# Send the payload bytes to the broker
self._sock.sendall(buf.get(buf.capacity))
# Read the first 4 bytes so we know the size
size = ByteBuffer(self._sock.recv(4)).getInt32()
# Read the response that we're expecting
response_data = self._read_bytes(size)
response = ByteBuffer(response_data)
# Parse off the correlation ID for the response
correlation_id = response.getInt32()
except EOFError:
raise ConnectionError("Failed to read enough data from Kafka")
except socket.error as e:
raise ConnectionError("Failed communicating with Kafka: {0}".format(e))
# Get the proper response class and parse the response
return correlation_id, request.response.from_bytebuffer(correlation_id, response.slice())
def _read_bytes(self, size):
bytes_left = size
responses = []
while bytes_left:
try:
data = self._sock.recv(min(bytes_left, 4096))
except socket.error:
raise socket.error("Unable to receive data from Kafka")
if data == b'':
raise socket.error("Not enough data to read message -- did server kill socket?")
bytes_left -= len(data)
responses.append(data)
return b''.join(responses)
def to_dict(self):
return {
'id': self.id,
'hostname': self.hostname,
'jmx_port': self.jmx_port,
'port': self.port,
'rack': self.rack,
'version': self.version
}
|
kornia/losses/depth_smooth.py | Ishticode/kornia | 4,894 | 12722993 | import torch
import torch.nn as nn
# Based on
# https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641
def _gradient_x(img: torch.Tensor) -> torch.Tensor:
if len(img.shape) != 4:
raise AssertionError(img.shape)
return img[:, :, :, :-1] - img[:, :, :, 1:]
def _gradient_y(img: torch.Tensor) -> torch.Tensor:
if len(img.shape) != 4:
raise AssertionError(img.shape)
return img[:, :, :-1, :] - img[:, :, 1:, :]
def inverse_depth_smoothness_loss(idepth: torch.Tensor, image: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes image-aware inverse depth smoothness loss.
.. math::
\text{loss} = \left | \partial_x d_{ij} \right | e^{-\left \|
\partial_x I_{ij} \right \|} + \left |
\partial_y d_{ij} \right | e^{-\left \| \partial_y I_{ij} \right \|}
Args:
idepth: tensor with the inverse depth with shape :math:`(N, 1, H, W)`.
image: tensor with the input image with shape :math:`(N, 3, H, W)`.
Return:
a scalar with the computed loss.
Examples:
>>> idepth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> loss = inverse_depth_smoothness_loss(idepth, image)
"""
if not isinstance(idepth, torch.Tensor):
raise TypeError(f"Input idepth type is not a torch.Tensor. Got {type(idepth)}")
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input image type is not a torch.Tensor. Got {type(image)}")
if not len(idepth.shape) == 4:
raise ValueError(f"Invalid idepth shape, we expect BxCxHxW. Got: {idepth.shape}")
if not len(image.shape) == 4:
raise ValueError(f"Invalid image shape, we expect BxCxHxW. Got: {image.shape}")
if not idepth.shape[-2:] == image.shape[-2:]:
raise ValueError(f"idepth and image shapes must be the same. Got: {idepth.shape} and {image.shape}")
if not idepth.device == image.device:
raise ValueError(f"idepth and image must be in the same device. Got: {idepth.device} and {image.device}")
if not idepth.dtype == image.dtype:
raise ValueError(f"idepth and image must be in the same dtype. Got: {idepth.dtype} and {image.dtype}")
# compute the gradients
idepth_dx: torch.Tensor = _gradient_x(idepth)
idepth_dy: torch.Tensor = _gradient_y(idepth)
image_dx: torch.Tensor = _gradient_x(image)
image_dy: torch.Tensor = _gradient_y(image)
# compute image weights
weights_x: torch.Tensor = torch.exp(-torch.mean(torch.abs(image_dx), dim=1, keepdim=True))
weights_y: torch.Tensor = torch.exp(-torch.mean(torch.abs(image_dy), dim=1, keepdim=True))
# apply image weights to depth
smoothness_x: torch.Tensor = torch.abs(idepth_dx * weights_x)
smoothness_y: torch.Tensor = torch.abs(idepth_dy * weights_y)
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
class InverseDepthSmoothnessLoss(nn.Module):
r"""Criterion that computes image-aware inverse depth smoothness loss.
.. math::
\text{loss} = \left | \partial_x d_{ij} \right | e^{-\left \|
\partial_x I_{ij} \right \|} + \left |
\partial_y d_{ij} \right | e^{-\left \| \partial_y I_{ij} \right \|}
Shape:
- Inverse Depth: :math:`(N, 1, H, W)`
- Image: :math:`(N, 3, H, W)`
- Output: scalar
Examples:
>>> idepth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> smooth = InverseDepthSmoothnessLoss()
>>> loss = smooth(idepth, image)
"""
def forward(self, idepth: torch.Tensor, image: torch.Tensor) -> torch.Tensor:
return inverse_depth_smoothness_loss(idepth, image)
|
Basic/Display Factors of A Number/SolutionByVaibhavTripathi.py | rajethanm4/Programmers-Community | 261 | 12723022 | num =int(input(" Input a Number: "))
def factorsOf(num):
factors=[]
for i in range(1,num+1):
if num%i==0:
factors.append(i)
print(factors)
factorsOf(num)
|
moldesign/units/tools.py | Autodesk/molecular-design-toolkit | 147 | 12723040 | from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import utils
from .quantity import *
def unitsum(iterable):
"""
Faster method to compute sums of iterables if they're all in the right units
Args:
iterable (Iter[MdtQuantity]): iterable to sum over
Returns:
MdtQuantity: the sum
"""
g0 = next(iterable).copy()
for item in iterable:
if item.units == g0.units:
g0._magnitude += item._magnitude
else:
g0 += item
return g0
def dot(a1, a2):
""" Dot product that respects units
Args:
a1 (MdtQuantity or np.ndarray): First term in dot product
a2 (MdtQuantity or np.ndarray): Second term in dot product
Returns:
MdtQuantity or np.ndarray: dot product (MdtQuantity if either input has units, ndarray else)
"""
if isinstance(a2, MdtQuantity):
return a2.ldot(a1)
else: # this will work whether or not a1 has units
return a1.dot(a2)
@utils.args_from(np.linspace)
def linspace(start, stop, **kwargs):
u1 = getattr(start, 'units', ureg.dimensionless)
u2 = getattr(stop, 'units', ureg.dimensionless)
if u1 == u2 == ureg.dimensionless:
return np.linspace(start, stop, **kwargs)
else:
q1mag = start.magnitude
q2mag = stop.value_in(start.units)
return np.linspace(q1mag, q2mag, **kwargs) * start.units
def arrays_almost_equal(a1, a2):
""" Return true if arrays are almost equal up to numerical noise
Note:
This is assumes that absolute differences less than 1e-12 are insignificant. It is
therefore more likely to return "True" for very small numbers and
"False" for very big numbers. Caveat emptor.
Args:
a1 (MdtQuantity or np.ndarray): first array
a2 (MdtQuantity or np.ndarray): second array
Returns:
bool: True if arrays differ by no more than numerical noise in any element
Raises:
DimensionalityError: if the arrays have incompatible units
"""
a1units = False
if isinstance(a1, MdtQuantity):
if a1.dimensionless:
a1mag = a1.value_in(ureg.dimensionless)
else:
a1units = True
a1mag = a1.magnitude
else:
a1mag = a1
if isinstance(a2, MdtQuantity):
if a2.dimensionless:
if a1units:
raise DimensionalityError(a1.units, ureg.dimensionless,
"Cannot compare objects")
else:
a2mag = a2.value_in(ureg.dimensionless)
elif not a1units:
raise DimensionalityError(ureg.dimensionless, a2.units,
"Cannot compare objects")
else:
a2mag = a2.value_in(a1.units)
else:
if a1units:
raise DimensionalityError(a1.units, ureg.dimensionless,
"Cannot compare objects")
else:
a2mag = a2
return np.allclose(a1mag, a2mag, atol=1e-12)
def from_json(j):
"""
Convert a JSON description to a quantity.
This is the inverse of :meth:`moldesign.units.quantity.MDTQuantity.to_json`
Args:
j (dict): ``{value: <float>, units: <str>}``
Returns:
moldesign.units.quantity.MDTQuantity
"""
return j['value'] * ureg(j['units'])
def get_units(q):
""" Return the base unit system of an quantity or arbitrarily-nested iterables of quantities
Note: This routine will dive on the first element of iterables until a quantity with units
until the units can be determined. It will not check the remaining elements of the iterable
for consistency
Examples:
>>> from moldesign import units
>>> units.get_units(1.0 * units.angstrom)
<Unit('angstrom')>
>>> units.get_units(np.array([1.0, 2, 3.0]))
<Unit('dimensionless')>
>>> # We dive on the first element of each iterable until we can determine a unit system:
>>> units.get_units([[1.0 * u.dalton, 3.0 * u.eV], ['a'], 'gorilla'])
<Unit('amu')>
Args:
q (MdtQuantity or numeric): quantity to test
Returns:
MdtUnit: the quantity's units
"""
if isinstance(q, MdtUnit):
return q
x = q
while True:
try:
x = next(x.__iter__())
except (AttributeError, TypeError):
break
else:
if isinstance(x, str):
raise TypeError('Found string data while trying to determine units')
q = MdtQuantity(x)
if q.dimensionless:
return ureg.dimensionless
else:
return q.units
def array(qlist, defunits=False, _baseunit=None):
""" Facilitates creating an array with units - like numpy.array, but it also checks
units for all components of the array.
Note:
Unlike numpy.array, these arrays must have numeric type - this routine will
raise a ValueError if a non-square array is passed.
Args:
qlist (List[MdtQuantity]): List-like object of quantity objects
defunits (bool): if True, convert the array to the default units
Returns:
MdtQuantity: ndarray-like object with standardized units
Raises:
DimensionalityError: if the array has inconsistent units
ValueError: if the array could not be converted to a square numpy array
"""
from . import default
if hasattr(qlist, 'units') and hasattr(qlist, 'magnitude'):
return MdtQuantity(qlist)
if _baseunit is None:
_baseunit = get_units(qlist)
if _baseunit.dimensionless:
return _make_nparray(qlist)
if defunits:
_baseunit = default.get_default(_baseunit)
if hasattr(qlist, 'to'): # if already a quantity, just convert and return
return qlist.to(_baseunit)
try: # try to create a quantity
return _baseunit * [array(item, _baseunit=_baseunit).value_in(_baseunit) for item in qlist]
except TypeError: # if here, one or more objects cannot be converted to quantities
raise DimensionalityError(_baseunit, ureg.dimensionless,
extra_msg='Object "%s" does not have units' % qlist)
def _make_nparray(q):
""" Turns a list of dimensionless numbers into a numpy array. Does not permit object arrays
"""
if hasattr(q, 'units'):
return q.value_in(ureg.dimensionless)
try:
arr = np.array([_make_nparray(x) for x in q])
if arr.dtype == 'O':
raise ValueError("Could not create numpy array of numeric data - is your input square?")
else:
return arr
except TypeError:
return q
<EMAIL>(np.broadcast_to)
def broadcast_to(arr, *args, **kwargs):
units = arr.units
newarr = np.zeros(2) * units
tmp = np.broadcast_to(arr, *args, **kwargs)
newarr._magnitude = tmp
return newarr
|
homeassistant/components/litejet/const.py | MrDelik/core | 30,023 | 12723053 | """LiteJet constants."""
from homeassistant.const import Platform
DOMAIN = "litejet"
CONF_EXCLUDE_NAMES = "exclude_names"
CONF_INCLUDE_SWITCHES = "include_switches"
PLATFORMS = [Platform.LIGHT, Platform.SCENE, Platform.SWITCH]
CONF_DEFAULT_TRANSITION = "default_transition"
|
python/disjointset/unoin_find.py | googege/algo-learn | 153 | 12723062 |
# 并查集的代码模板
class UnionFind:
def __init__(self, n: int):
self.count = n
self.parent = [i for i in range(n)]
def find(self, p: int):
temp = p
while p != self.parent[p]:
p = self.parent[p]
while temp != self.parent[p]:
temp, self.parent[temp] = self.parent[temp], p
return p
def union(self, p, q):
pSet, qSet = self.find(p), self.find(q)
if self.parent[pSet] != qSet:
self.parent[pSet] = qSet
self.count -= 1
|
decrypt.py | mbinary/netease-music-cracker | 324 | 12723094 | <filename>decrypt.py
# coding : utf-8
import re
import os
import sys
import getpass
import urllib3
import requests
from mutagen.easyid3 import EasyID3
from mutagen.mp3 import MP3
from mutagen.id3 import ID3, APIC, TIT2, TPE1, TALB, USLT
# ID3 info:
tagMap = {'cover': APIC, 'title': TIT2,
'artist': TPE1, 'album': TALB, 'lyric': USLT}
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {'User-agent': 'Mozilla/5.0'}
MSCDIR = './mp3'
# print(repr(s)[1:-1]) # deal with invalid encoded filename
class netease_music:
def __init__(self, path):
'''path: direcotory that contains cache files'''
self.path = os.path.abspath(path)
self.id_name = {i[:i.find('-')]: i for i in os.listdir(path)
if i.endswith('.uc') or i.endswith('.uc!')}
if self.id_name:
if not os.path.exists(MSCDIR):
os.mkdir(MSCDIR)
print('Input :', path)
print('Output:', MSCDIR)
else:
print('No cache file found in "{}"'.format(path))
def getInfoFromWeb(self, musicId):
# xpath for name and lrc:
# self.nameXpath ='//div[@class="tit"]/em[@class="f-ff2"]/text()'
# self.lrcSentencePt=re.compile(r'\[\d+:\d+\.\d+\](.*?)\\n') # wrong (r'\[\d+,\d+\](\(\d+,\d+\)(\w))+\n')
# api :
# type=song, lyric, comments, detail, artist, album, search
# eg API = 'https://api.imjad.cn/cloudmusic/?type=song&id=1234132' download music
dic = {}
url = 'http://music.163.com/api/song/detail/?ids=[' + musicId + ']'
res = requests.get(url, headers=headers).json()
info = res['songs'][0]
dic['artist'] = [info['artists'][0]['name']]
dic['title'] = [info['name']]
dic['cover'] = [info['album']['picUrl']]
dic['album'] = [info['album']['name']]
return dic
def getPath(self, dic, musicId):
'''get the name of music from info dict'''
title = dic['title'][0]
artist = dic['artist'][0]
name = title + '(' + artist+')'
for i in '>?*/\:"|<':
name = name.replace(i, '-') # convert to valid chars for file name
name = re.sub('\s', '_', name)
self.id_name[musicId] = name
return os.path.join(MSCDIR, name + '.mp3')
def decrypt(self, musicId, name):
def _decrypt(cachePath):
with open(cachePath, 'rb') as f:
btay = bytearray(f.read())
for i, j in enumerate(btay):
btay[i] = j ^ 0xa3
return btay
cachePath = os.path.join(self.path, name)
idpath = os.path.join(MSCDIR, musicId + '.mp3')
info = self.getInfoFromWeb(musicId)
path = self.getPath(info, musicId)
if not os.path.exists(path):
with open(path, 'wb') as f:
f.write(bytes(_decrypt(cachePath)))
''' get info from index file
if not os.path.exists(idpath):
with open(idpath, 'wb') as f:
f.write(bytes(_decrypt(cachePath)))
try:
info = dict(MP3(idpath, ID3=EasyID3))
except:
info = {}
if info != {}:
path = self.getPath(info, musicId)
if os.path.exists(path):
os.remove(idpath)
else:
os.rename(idpath, path)
else:
os.remove(idpath)
'''
return info, path
def getLyric(self, musicId):
url = 'http://music.163.com/api/song/lyric?id=' + musicId + '&lv=1&tv=-1'
lrc = ''
try:
lyric = requests.get(url, headers=headers).json()
lrc = lyric['lrc']['lyric']
tlrc = lyric['tlyric']['lyric']
# merge multi-lang lyrics
dic = {}
for i in lrc.splitlines():
a = i.replace('[', ']').strip().split("]")
dic[a[1].strip()+' '] = a[-1].strip()
tdic = {}
for m in tlrc.splitlines():
n = m.replace('[', ']').strip().split(']')
tdic[n[1].strip()] = n[-1].strip()
dicCopy = dic.copy()
dicCopy.update(tdic)
lines = []
for k, v in sorted(dicCopy.items(), key=lambda item: item[0]):
lines.append("[%s]%s" % (k.strip(), v))
lrc = "\n".join(lines)
except Exception as e:
pass
return lrc
def setID3(self, lrc, info, path):
tags = ID3(path)
# remove old unsychronized lyrics
len(tags.getall("USLT")) != 0 and tags.delall("USLT")
for t in ['album', 'title', 'artist']:
t in info and tags.add(
tagMap[t](encoding=3, lang='', desc='', text=info[t][0]))
'cover' in info and tags.add(APIC(
encoding=3,
mime='image/png',
type=3,
desc='cover',
data=requests.get(info['cover'][0], stream=True,
headers=headers).raw.read()
))
tags.add(USLT(encoding=3, lang='eng', desc='aaa', text=lrc))
tags.save()
def getMusic(self):
ct = 0 # count converted files
for musicId, name in self.id_name.items():
try:
info, path = self.decrypt(musicId, name)
ct += 1
print('[{}]'.format(ct).ljust(6) + self.id_name[musicId])
self.setID3(self.getLyric(musicId), info, path)
except Exception as e:
pass
def main(path=''):
if not path:
pre = '/'.join(os.getcwd().split(os.sep)[:3])
if os.sys.platform.lower().startswith('win'): # windows
path = pre + '/AppData/Local/Netease/CloudMusic/Cache/Cache'
else: # mac or linux
path = pre + '/Library/Containers/com.netease.163music/Data/Caches/online_play_cache'
if os.path.isdir(path):
netease_music(path).getMusic()
else:
print('Directory "{}" does not exist, specify cache files directory instead'.format(path))
if __name__ == '__main__':
main() if len(sys.argv) < 2 else main(sys.argv[1])
|
examples/timesheet/libtimesheet/controller/StartupCommand.py | takipsizad/pyjs | 739 | 12723102 |
# vim: set ts=4 sw=4 expandtab:
from puremvc.patterns.command import SimpleCommand
from libtimesheet.model.TimeProxy import TimeProxy
from libtimesheet.view.DialogMediator import DialogMediator
from libtimesheet.view.MenuMediator import MenuMediator
from libtimesheet.view.DatePickerMediator import DatePickerMediator
from libtimesheet.view.TimeGridMediator import TimeGridMediator
from libtimesheet.view.SummaryMediator import SummaryMediator
class StartupCommand(SimpleCommand):
def execute(self,note):
self.facade.registerProxy(TimeProxy())
mainPanel = note.getBody()
self.facade.registerMediator(DialogMediator(mainPanel))
self.facade.registerMediator(MenuMediator(mainPanel.menuBar))
self.facade.registerMediator(TimeGridMediator(mainPanel.timeGrid))
self.facade.registerMediator(SummaryMediator(mainPanel.summary))
# This one must be registered last, or at least after TimeGridMediator
# Fires DATE_SELECTED notification, which is used in TimeGridMediator
self.facade.registerMediator(DatePickerMediator(mainPanel.datePicker))
|
mcpipy/maze.py | wangtt03/raspberryjammod | 338 | 12723104 | from mine import *
from sys import argv
from random import randint
DIRS = ((1,0),(0,1),(-1,0),(0,-1))
def generateMaze(xSize, ySize, start=(0,0), dirs=DIRS, inside=None):
if inside == None:
inside = lambda xy : 0 <= xy[0] < xSize and 0 <= xy[1] < ySize
def move(pos, dir):
return (pos[0]+dirs[dir][0],pos[1]+dirs[dir][1])
nDirs = len(dirs)
def findDir(v):
for i in range(nDirs):
if dirs[i] == v:
return i
raise Exception("Mismatched direction")
revDir = tuple( findDir((-dirs[i][0],-dirs[i][1])) for i in range(nDirs) )
visited = tuple([False for j in range(ySize)] for i in range(xSize))
walls = tuple(tuple( [True for j in range(nDirs)] for j in range(ySize) ) for i in range(xSize))
pos = start
def getVisited(pos):
return not inside(pos) or visited[pos[0]][pos[1]]
stack = []
while True:
visited[pos[0]][pos[1]] = True
nUnvisited = 0
for dir in range(nDirs):
if not getVisited(move(pos,dir)):
nUnvisited += 1
if nUnvisited == 0:
if stack:
pos = stack.pop()
continue
else:
break
n = randint(0,nUnvisited-1)
dir = 0
while True:
if not getVisited(move(pos,dir)):
if n == 0:
break
n -= 1
dir += 1
walls[pos[0]][pos[1]][dir] = False
pos = move(pos,dir)
walls[pos[0]][pos[1]][revDir[dir]] = False
stack.append(pos)
return walls
xSize = 40
ySize = 40
b = block.STONE
if len(argv)>1:
xSize = int(argv[1])
ySize = xSize
if len(argv)>2:
b = Block.byName(argv[2])
mc = Minecraft()
walls = generateMaze(xSize,ySize)
pos = mc.player.getTilePos()
pos.x += 1
my = pos.y
for x in range(xSize):
for y in range(ySize):
mx = 2*x + pos.x
mz = 2*y + pos.z
def set(d1,d2):
mc.setBlock(mx+d1,my,mz+d2,b)
mc.setBlock(mx+d1,my+1,mz+d2,b)
for dir in range(len(DIRS)):
if walls[x][y][dir]:
set(DIRS[dir][0],DIRS[dir][1])
set(1,1)
set(1,-1)
set(-1,1)
set(-1,-1)
mc.setBlock(pos.x-1,pos.y,pos.z,block.AIR)
mc.setBlock(pos.x-1,pos.y+1,pos.z,block.AIR)
mc.setBlock(pos.x+2*(xSize-1)+1,pos.y-1,pos.z+2*(ySize-1),block.GOLD_BLOCK)
mc.setBlockWithNBT(pos.x+2*(xSize-1)+1,pos.y,pos.z+2*(ySize-1),block.SIGN('EXIT',headingAngle=270))
mc.setBlock(pos.x+2*(xSize-1)+1,pos.y+1,pos.z+2*(ySize-1),block.AIR)
|
src/Deform_Conv/functions/deform_psroi_pooling_func.py | Kirillova-Anastasia/LGFN | 181 | 12723125 | <gh_stars>100-1000
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import torch
from torch import nn
from torch.autograd import Function
from torch.nn.modules.utils import _pair
from torch.autograd.function import once_differentiable
import DCN
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(ctx, input, rois, offset,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
ctx.spatial_scale = spatial_scale
ctx.no_trans = int(no_trans)
ctx.output_dim = output_dim
ctx.group_size = group_size
ctx.pooled_size = pooled_size
ctx.part_size = pooled_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
output, output_count = \
DCN.deform_psroi_pooling_forward(input, rois, offset,
ctx.no_trans, ctx.spatial_scale,
ctx.output_dim, ctx.group_size,
ctx.pooled_size, ctx.part_size,
ctx.sample_per_part, ctx.trans_std)
ctx.save_for_backward(input, rois, offset, output_count)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, offset, output_count = ctx.saved_tensors
grad_input, grad_offset = \
DCN.deform_psroi_pooling_backward(grad_output,
input,
rois,
offset,
output_count,
ctx.no_trans,
ctx.spatial_scale,
ctx.output_dim,
ctx.group_size,
ctx.pooled_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std)
return grad_input, None, grad_offset, \
None, None, None, None, None, None, None, None
|
modules/autoBump.py | logicguy1/The-all-in-one-discord-tool | 105 | 12723131 | LICNECE = """
Copyright © 2021 Drillenissen#4268
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from colored import fg, attr
import requests
import time
r = fg(241) # Setup color variables
r2 = fg(255)
b = fg(31)
w = fg(15)
def bumper():
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7',
'Authorization' : input(f"\n {r2}[{b}?{r2}] Token: ")
}
id = input(f" {r2}[{b}?{r2}] Channel ID: ")
print(f" {r2}[{b}!{r2}] Use ^C to exit")
time.sleep(.3)
print("")
while True:
requests.post(
f"https://discord.com/api/channels/{id}/messages",
headers = headers,
json = {"content" : "!d bump"}
)
print(f" {r2}[{b}+{r2}] Server Bumped")
time.sleep(121 * 60)
|
experiments/nbody/nbody_run.py | yunmeng103/se3-transformer-public | 307 | 12723138 | from utils.utils_profiling import * # load before other local modules
import argparse
import os
import sys
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import dgl
import numpy as np
import torch
import wandb
import time
import datetime
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader
from experiments.nbody.nbody_dataloader import RIDataset
from utils import utils_logging
from experiments.nbody import nbody_models as models
from equivariant_attention.from_se3cnn.SO3 import rot
from experiments.nbody.nbody_flags import get_flags
def to_np(x):
return x.cpu().detach().numpy()
def get_acc(pred, x_T, v_T, y=None, verbose=True):
acc_dict = {}
pred = to_np(pred)
x_T = to_np(x_T)
v_T = to_np(v_T)
assert len(pred) == len(x_T)
if verbose:
y = np.asarray(y.cpu())
_sq = (pred - y) ** 2
acc_dict['mse'] = np.mean(_sq)
_sq = (pred[:, 0, :] - x_T) ** 2
acc_dict['pos_mse'] = np.mean(_sq)
_sq = (pred[:, 1, :] - v_T) ** 2
acc_dict['vel_mse'] = np.mean(_sq)
return acc_dict
def train_epoch(epoch, model, loss_fnc, dataloader, optimizer, schedul, FLAGS):
model.train()
loss_epoch = 0
num_iters = len(dataloader)
wandb.log({"lr": optimizer.param_groups[0]['lr']}, commit=False)
for i, (g, y1, y2) in enumerate(dataloader):
g = g.to(FLAGS.device)
x_T = y1.to(FLAGS.device).view(-1, 3)
v_T = y2.to(FLAGS.device).view(-1, 3)
y = torch.stack([x_T, v_T], dim=1)
optimizer.zero_grad()
# run model forward and compute loss
pred = model(g)
loss = loss_fnc(pred, y)
loss_epoch += to_np(loss)
if torch.isnan(loss):
import pdb
pdb.set_trace()
# backprop
loss.backward()
optimizer.step()
# print to console
if i % FLAGS.print_interval == 0:
print(
f"[{epoch}|{i}] loss: {loss:.5f}")
# log to wandb
if i % FLAGS.log_interval == 0:
# 'commit' is only set to True here, meaning that this is where
# wandb counts the steps
wandb.log({"Train Batch Loss": to_np(loss)}, commit=True)
# exit early if only do profiling
if FLAGS.profile and i == 10:
sys.exit()
schedul.step(epoch + i / num_iters)
# log train accuracy for entire epoch to wandb
loss_epoch /= len(dataloader)
wandb.log({"Train Epoch Loss": loss_epoch}, commit=False)
def test_epoch(epoch, model, loss_fnc, dataloader, FLAGS, dT):
model.eval()
keys = ['pos_mse', 'vel_mse']
acc_epoch = {k: 0.0 for k in keys}
acc_epoch_blc = {k: 0.0 for k in keys} # for constant baseline
acc_epoch_bll = {k: 0.0 for k in keys} # for linear baseline
loss_epoch = 0.0
for i, (g, y1, y2) in enumerate(dataloader):
g = g.to(FLAGS.device)
x_T = y1.view(-1, 3)
v_T = y2.view(-1, 3)
y = torch.stack([x_T, v_T], dim=1).to(FLAGS.device)
# run model forward and compute loss
pred = model(g).detach()
loss_epoch += to_np(loss_fnc(pred, y)/len(dataloader))
acc = get_acc(pred, x_T, v_T, y=y)
for k in keys:
acc_epoch[k] += acc[k]/len(dataloader)
# eval constant baseline
bl_pred = torch.zeros_like(pred)
acc = get_acc(bl_pred, x_T, v_T, verbose=False)
for k in keys:
acc_epoch_blc[k] += acc[k]/len(dataloader)
# eval linear baseline
# Apply linear update to locations.
bl_pred[:, 0, :] = dT * g.ndata['v'][:, 0, :]
acc = get_acc(bl_pred, x_T, v_T, verbose=False)
for k in keys:
acc_epoch_bll[k] += acc[k] / len(dataloader)
print(f"...[{epoch}|test] loss: {loss_epoch:.5f}")
wandb.log({"Test loss": loss_epoch}, commit=False)
for k in keys:
wandb.log({"Test " + k: acc_epoch[k]}, commit=False)
wandb.log({'Const. BL pos_mse': acc_epoch_blc['pos_mse']}, commit=False)
wandb.log({'Linear BL pos_mse': acc_epoch_bll['pos_mse']}, commit=False)
wandb.log({'Linear BL vel_mse': acc_epoch_bll['vel_mse']}, commit=False)
class RandomRotation(object):
def __init__(self):
pass
def __call__(self, x):
M = np.random.randn(3, 3)
Q, __ = np.linalg.qr(M)
return x @ Q
def collate(samples):
graphs, y1, y2 = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
return batched_graph, torch.stack(y1), torch.stack(y2)
def main(FLAGS, UNPARSED_ARGV):
# Prepare data
train_dataset = RIDataset(FLAGS, split='train')
train_loader = DataLoader(train_dataset,
batch_size=FLAGS.batch_size,
shuffle=True,
collate_fn=collate,
num_workers=FLAGS.num_workers,
drop_last=True)
test_dataset = RIDataset(FLAGS, split='test')
# drop_last is only here so that we can count accuracy correctly;
test_loader = DataLoader(test_dataset,
batch_size=FLAGS.batch_size,
shuffle=False,
collate_fn=collate,
num_workers=FLAGS.num_workers,
drop_last=True)
# time steps
assert train_dataset.data['delta_T'] == test_dataset.data['delta_T']
assert train_dataset.data['sample_freq'] == test_dataset.data['sample_freq']
print(f'deltaT: {train_dataset.data["delta_T"]}, '
f'freq: {train_dataset.data["sample_freq"]}, '
f'FLAGS.ri_delta_t: {FLAGS.ri_delta_t}')
dT = train_dataset.data['delta_T'] * train_dataset.data[
'sample_freq'] * FLAGS.ri_delta_t
FLAGS.train_size = len(train_dataset)
FLAGS.test_size = len(test_dataset)
assert len(test_dataset) < len(train_dataset)
model = models.__dict__.get(FLAGS.model)(FLAGS.num_layers, FLAGS.num_channels, num_degrees=FLAGS.num_degrees,
div=FLAGS.div, n_heads=FLAGS.head, si_m=FLAGS.simid, si_e=FLAGS.siend,
x_ij=FLAGS.xij)
utils_logging.write_info_file(model, FLAGS=FLAGS, UNPARSED_ARGV=UNPARSED_ARGV, wandb_log_dir=wandb.run.dir)
if FLAGS.restore is not None:
model.load_state_dict(torch.load(FLAGS.restore))
model.to(FLAGS.device)
# Optimizer settings
optimizer = optim.Adam(model.parameters(), lr=FLAGS.lr)
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, FLAGS.num_epochs, eta_min=1e-4)
criterion = nn.MSELoss()
criterion = criterion.to(FLAGS.device)
task_loss = criterion
# Save path
save_path = os.path.join(FLAGS.save_dir, FLAGS.name + '.pt')
# Run training
print('Begin training')
for epoch in range(FLAGS.num_epochs):
torch.save(model.state_dict(), save_path)
print(f"Saved: {save_path}")
train_epoch(epoch, model, task_loss, train_loader, optimizer, scheduler, FLAGS)
test_epoch(epoch, model, task_loss, test_loader, FLAGS, dT)
if __name__ == '__main__':
FLAGS, UNPARSED_ARGV = get_flags()
os.makedirs(FLAGS.save_dir, exist_ok=True)
# Log all args to wandb
wandb.init(project='equivariant-attention', name=FLAGS.name, config=FLAGS)
wandb.save('*.txt')
# Where the magic is
try:
main(FLAGS, UNPARSED_ARGV)
except Exception:
import pdb, traceback
traceback.print_exc()
pdb.post_mortem()
|
pypdb/clients/search/operators/seqmotif_operators.py | JBGreisman/pypdb | 194 | 12723146 | <filename>pypdb/clients/search/operators/seqmotif_operators.py
"""Operators associated with SeqMotif searching using RCSB Search API."""
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict
class SequenceType(Enum):
"""Type of sequence being searched for motifs."""
DNA = "pdb_dna_sequence"
RNA = "pdb_rna_sequence"
PROTEIN = "pdb_protein_sequence"
class PatternType(Enum):
"""Type of pattern being used for SeqMotif search."""
SIMPLE = "simple"
PROSITE = "prosite"
REGEX = "regex"
@dataclass
class SeqMotifOperator:
# Pattern to search with
pattern: str
sequence_type: SequenceType
pattern_type: PatternType
def _to_dict(self) -> Dict[str, Any]:
return {
"value": self.pattern,
"pattern_type": self.pattern_type.value,
"target": self.sequence_type.value
}
# DO NOT APPROVE: DO NOT APPROVE THIS CL UNTIL ADDED TO VALIDATION
|
python/federatedml/components/secure_add_example.py | QuantumA/FATE | 715 | 12723175 | <reponame>QuantumA/FATE
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
secure_add_example_cpn_meta = ComponentMeta("SecureAddExample")
@secure_add_example_cpn_meta.bind_param
def secure_add_example_param():
from federatedml.param.secure_add_example_param import SecureAddExampleParam
return SecureAddExampleParam
@secure_add_example_cpn_meta.bind_runner.on_guest
def secure_add_example_guest_runner():
from federatedml.toy_example.secure_add_guest import SecureAddGuest
return SecureAddGuest
@secure_add_example_cpn_meta.bind_runner.on_host
def secure_add_example_host_runner():
from federatedml.toy_example.secure_add_host import SecureAddHost
return SecureAddHost
|
WebMirror/management/rss_parser_funcs/feed_parse_extractXiakeluojiao侠客落脚.py | fake-name/ReadableWebProxy | 193 | 12723201 | def extractXiakeluojiao侠客落脚(item):
"""
Xiakeluojiao 侠客落脚
"""
badwords = [
'korean drama',
'badword',
]
if any([bad in item['tags'] for bad in badwords]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = [
('Pr<NAME>iyang', 'The <NAME>', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if item['title'].startswith("Chapter ") and item['tags'] == []:
return buildReleaseMessageWithType(item, 'Zhu Xian', vol, chp, frag=frag, postfix=postfix)
return False |
models/googlenet.py | MahimaGaur31/mlModels | 354 | 12723262 | from collections import OrderedDict
import torch
import torch.nn as nn
__all__ = ['googlenet']
class Inception_v1_GoogLeNet(nn.Module):
input_side = 227
rescale = 255.0
rgb_mean = [122.7717, 115.9465, 102.9801]
rgb_std = [1, 1, 1]
def __init__(self, num_classes=1000):
super(Inception_v1_GoogLeNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
OrderedDict([
('conv1', nn.Sequential(OrderedDict([
('7x7_s2', nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3), bias=False)),
('7x7_s2_bn', nn.BatchNorm2d(64, affine=True)),
('relu1', nn.ReLU(True)),
('pool1', nn.MaxPool2d((3, 3), (2, 2), padding=(1,1)))
]))),
('conv2', nn.Sequential(OrderedDict([
('3x3_reduce', nn.Conv2d(64, 64, (1, 1), (1, 1), (0, 0), bias=False)),
('3x3_reduce_bn', nn.BatchNorm2d(64, affine=True)),
('relu1', nn.ReLU(True)),
('3x3', nn.Conv2d(64, 192, (3, 3), (1, 1), (1, 1), bias=False)),
('3x3_bn', nn.BatchNorm2d(192, affine=True)),
('relu2', nn.ReLU(True)),
('pool2', nn.MaxPool2d((3, 3), (2, 2), padding=(1,1)))
]))),
('inception_3a', InceptionModule(192, 64, 96, 128, 16, 32, 32)),
('inception_3b', InceptionModule(256, 128, 128, 192, 32, 96, 64)),
('pool3', nn.MaxPool2d((3, 3), (2, 2), padding=(1,1))),
('inception_4a', InceptionModule(480, 192, 96, 208, 16, 48, 64)),
('inception_4b', InceptionModule(512, 160, 112, 224, 24, 64, 64)),
('inception_4c', InceptionModule(512, 128, 128, 256, 24, 64, 64)),
('inception_4d', InceptionModule(512, 112, 144, 288, 32, 64, 64)),
('inception_4e', InceptionModule(528, 256, 160, 320, 32, 128, 128)),
('pool4', nn.MaxPool2d((3, 3), (2, 2), padding=(1,1))),
('inception_5a', InceptionModule(832, 256, 160, 320, 32, 128, 128)),
('inception_5b', InceptionModule(832, 384, 192, 384, 48, 128, 128)),
('pool5', nn.AvgPool2d((7, 7), (1, 1))),
('drop5', nn.Dropout(0.2))
]))
self.classifier = nn.Linear(1024, self.num_classes)
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1,
'weight_decay': 1e-4, 'momentum': 0.9},
{'epoch': 30, 'lr': 1e-2},
{'epoch': 60, 'lr': 1e-3, 'weight_decay': 0},
{'epoch': 90, 'lr': 1e-3, 'optimizer': 'Adam'}
]
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class InceptionModule(nn.Module):
def __init__(self, inplane, outplane_a1x1, outplane_b3x3_reduce, outplane_b3x3, outplane_c5x5_reduce, outplane_c5x5,
outplane_pool_proj):
super(InceptionModule, self).__init__()
a = nn.Sequential(OrderedDict([
('1x1', nn.Conv2d(inplane, outplane_a1x1, (1, 1), (1, 1), (0, 0), bias=False)),
('1x1_bn', nn.BatchNorm2d(outplane_a1x1, affine=True)),
('1x1_relu', nn.ReLU(True))
]))
b = nn.Sequential(OrderedDict([
('3x3_reduce', nn.Conv2d(inplane, outplane_b3x3_reduce, (1, 1), (1, 1), (0, 0), bias=False)),
('3x3_reduce_bn', nn.BatchNorm2d(outplane_b3x3_reduce, affine=True)),
('3x3_relu1', nn.ReLU(True)),
('3x3', nn.Conv2d(outplane_b3x3_reduce, outplane_b3x3, (3, 3), (1, 1), (1, 1), bias=False)),
('3x3_bn', nn.BatchNorm2d(outplane_b3x3, affine=True)),
('3x3_relu2', nn.ReLU(True))
]))
c = nn.Sequential(OrderedDict([
('5x5_reduce', nn.Conv2d(inplane, outplane_c5x5_reduce, (1, 1), (1, 1), (0, 0), bias=False)),
('5x5_reduce_bn', nn.BatchNorm2d(outplane_c5x5_reduce, affine=True)),
('5x5_relu1', nn.ReLU(True)),
('5x5', nn.Conv2d(outplane_c5x5_reduce, outplane_c5x5, (5, 5), (1, 1), (2, 2), bias=False)),
('5x5_bn', nn.BatchNorm2d(outplane_c5x5, affine=True)),
('5x5_relu2', nn.ReLU(True))
]))
d = nn.Sequential(OrderedDict([
('pool_pool', nn.MaxPool2d((3, 3), (1, 1), (1, 1))),
('pool_proj', nn.Conv2d(inplane, outplane_pool_proj, (1, 1), (1, 1), (0, 0))),
('pool_proj_bn', nn.BatchNorm2d(outplane_pool_proj, affine=True)),
('pool_relu', nn.ReLU(True))
]))
for container in [a, b, c, d]:
for name, module in container.named_children():
self.add_module(name, module)
self.branches = [a, b, c, d]
def forward(self, input):
return torch.cat([branch(input) for branch in self.branches], 1)
def googlenet(**kwargs):
num_classes = getattr(kwargs, 'num_classes', 1000)
return Inception_v1_GoogLeNet(num_classes) |
homeassistant/components/mqtt/device_tracker/__init__.py | tbarbette/core | 22,481 | 12723265 | <reponame>tbarbette/core<filename>homeassistant/components/mqtt/device_tracker/__init__.py
"""Support for tracking MQTT enabled devices."""
from .schema_discovery import async_setup_entry_from_discovery
from .schema_yaml import PLATFORM_SCHEMA_YAML, async_setup_scanner_from_yaml
PLATFORM_SCHEMA = PLATFORM_SCHEMA_YAML
async_setup_scanner = async_setup_scanner_from_yaml
async_setup_entry = async_setup_entry_from_discovery
|
vermin/formats/format.py | loganswartz/vermin | 261 | 12723271 | <filename>vermin/formats/format.py
from abc import ABCMeta, abstractmethod
class Format:
"""Format encapsulates a format for presenting minimum versions and related information during
processing."""
# Can't use `class Format(metaclass=ABCMeta)` in py2.
__metaclass__ = ABCMeta
def __init__(self, name):
self.__name = name
self.__config = None
def name(self):
return self.__name
def config(self):
return self.__config
def set_config(self, config):
self.__config = config
@staticmethod
def require_config(funcobj):
"""Decorator that checks config is not None."""
def _require_config(self, *args, **kwargs):
assert(self.config() is not None)
return funcobj(self, *args, **kwargs)
return _require_config
@abstractmethod
def skip_output_line(self):
"""Whether or not to skip outputting a line."""
@abstractmethod
def format_output_line(self, msg, path=None, line=None, col=None, versions=None):
"""Yield formatted output line given file name, line, column, text, minimum versions."""
@abstractmethod
def output_result(self, proc_res):
"""Output processed result."""
@abstractmethod
def sort_output_lines(self, lines):
"""Sort and return output lines."""
|
models/vip.py | kevin-ssy/ViP | 107 | 12723285 | <reponame>kevin-ssy/ViP<gh_stars>100-1000
"""
ViP Architecture in PyTorch
Copyright 2021 <NAME>
"""
import math
import torch.nn.init as init
from timm.models.registry import register_model
from timm.models.layers import DropPath
from .vip_layers import *
class PatchEmbed(nn.Module):
def __init__(self, stride, has_mask=False, in_ch=0, out_ch=0):
super(PatchEmbed, self).__init__()
self.to_token = nn.Conv2d(in_ch, in_ch, kernel_size=3, padding=1, stride=stride, groups=in_ch)
self.proj = nn.Linear(in_ch, out_ch, bias=False)
self.has_mask = has_mask
def process_mask(self, x, mask, H, W):
if mask is None and self.has_mask:
mask = x.new_zeros((1, 1, H, W))
if mask is not None:
H_mask, W_mask = mask.shape[-2:]
if H_mask != H or W_mask != W:
mask = F.interpolate(mask, (H, W), mode='nearest')
return mask
def forward(self, x, mask):
"""
Args:
x: [B, C, H, W]
mask: [B, 1, H, W] if exists, else None
Returns:
out: [B, out_H * out_W, out_C]
H, W: output height & width
mask: [B, 1, out_H, out_W] if exists, else None
"""
out = self.to_token(x)
B, C, H, W = out.shape
mask = self.process_mask(out, mask, H, W)
out = rearrange(out, "b c h w -> b (h w) c").contiguous()
out = self.proj(out)
return out, H, W, mask
class Encoder(nn.Module):
def __init__(self, dim, num_parts=64, num_enc_heads=1, drop_path=0.1, act=nn.GELU, has_ffn=True):
super(Encoder, self).__init__()
self.num_heads = num_enc_heads
self.enc_attn = AnyAttention(dim, num_enc_heads)
self.drop_path = DropPath(drop_prob=drop_path) if drop_path else nn.Identity()
self.reason = SimpleReasoning(num_parts, dim)
self.enc_ffn = Mlp(dim, hidden_features=dim, act_layer=act) if has_ffn else None
def forward(self, feats, parts=None, qpos=None, kpos=None, mask=None):
"""
Args:
feats: [B, patch_num * patch_size, C]
parts: [B, N, C]
qpos: [B, N, 1, C]
kpos: [B, patch_num * patch_size, C]
mask: [B, 1, patch_num, patch_size] if exists, else None
Returns:
parts: [B, N, C]
"""
attn_out = self.enc_attn(q=parts, k=feats, v=feats, qpos=qpos, kpos=kpos, mask=mask)
parts = parts + self.drop_path(attn_out)
parts = self.reason(parts)
if self.enc_ffn is not None:
parts = parts + self.drop_path(self.enc_ffn(parts))
return parts
class Decoder(nn.Module):
def __init__(self, dim, num_heads=8, patch_size=7, ffn_exp=3, act=nn.GELU, drop_path=0.1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
self.attn1 = AnyAttention(dim, num_heads)
self.attn2 = AnyAttention(dim, num_heads)
self.rel_pos = FullRelPos(patch_size, patch_size, dim // num_heads)
self.ffn1 = Mlp(dim, hidden_features=dim * ffn_exp, act_layer=act, norm_layer=Norm)
self.ffn2 = Mlp(dim, hidden_features=dim * ffn_exp, act_layer=act, norm_layer=Norm)
self.drop_path = DropPath(drop_path)
def forward(self, x, parts=None, part_kpos=None, mask=None, P=0):
"""
Args:
x: [B, patch_num * patch_size, C]
parts: [B, N, C]
part_kpos: [B, N, 1, C]
mask: [B, 1, patch_num, patch_size] if exists, else None
P: patch_num
Returns:
feat: [B, patch_num, patch_size, C]
"""
dec_mask = None if mask is None else rearrange(mask.squeeze(1), "b h w -> b (h w) 1 1")
out = self.attn1(q=x, k=parts, v=parts, kpos=part_kpos, mask=dec_mask)
out = x + self.drop_path(out)
out = out + self.drop_path(self.ffn1(out))
out = rearrange(out, "b (p k) c -> (b p) k c", p=P)
local_out = self.attn2(q=out, k=out, v=out, mask=mask, rel_pos=self.rel_pos)
out = out + self.drop_path(local_out)
out = out + self.drop_path(self.ffn2(out))
return rearrange(out, "(b p) k c -> b p k c", p=P)
class ViPBlock(nn.Module):
def __init__(self, dim, ffn_exp=4, drop_path=0.1, patch_size=7, num_heads=1, num_enc_heads=1, num_parts=0):
super(ViPBlock, self).__init__()
self.encoder = Encoder(dim, num_parts=num_parts, num_enc_heads=num_enc_heads, drop_path=drop_path)
self.decoder = Decoder(dim, num_heads=num_heads, patch_size=patch_size, ffn_exp=ffn_exp, drop_path=drop_path)
def forward(self, x, parts=None, part_qpos=None, part_kpos=None, mask=None):
"""
Args:
x: [B, patch_num, patch_size, C]
parts: [B, N, C]
part_qpos: [B, N, 1, C]
part_kpos: [B, N, 1, C]
mask: [B, 1, patch_num, patch_size] if exists, else None
Returns:
feats: [B, patch_num, patch_size, C]
parts: [B, N, C]
part_qpos: [B, N, 1, C]
mask: [B, 1, patch_num, patch_size] if exists, else None
"""
P = x.shape[1]
x = rearrange(x, "b p k c -> b (p k) c")
parts = self.encoder(x, parts=parts, qpos=part_qpos, mask=mask)
feats = self.decoder(x, parts=parts, part_kpos=part_kpos, mask=mask, P=P)
return feats, parts, part_qpos, mask
class Stage(nn.Module):
def __init__(self, in_ch, out_ch, num_blocks, patch_size=7, num_heads=1, num_enc_heads=1, stride=1, num_parts=0,
last_np=0, last_enc=False, drop_path=0.1, has_mask=None, ffn_exp=3):
super(Stage, self).__init__()
if isinstance(drop_path, float):
drop_path = [drop_path for _ in range(num_blocks)]
self.patch_size = patch_size
self.rpn_qpos = nn.Parameter(torch.Tensor(1, num_parts, 1, out_ch // num_enc_heads))
self.rpn_kpos = nn.Parameter(torch.Tensor(1, num_parts, 1, out_ch // num_heads))
self.proj = PatchEmbed(stride, has_mask=has_mask, in_ch=in_ch, out_ch=out_ch)
self.proj_token = nn.Sequential(
nn.Conv1d(last_np, num_parts, 1, bias=False) if last_np != num_parts else nn.Identity(),
nn.Linear(in_ch, out_ch),
Norm(out_ch)
)
self.proj_norm = Norm(out_ch)
blocks = [
ViPBlock(out_ch,
patch_size=patch_size,
num_heads=num_heads,
num_enc_heads=num_enc_heads,
num_parts=num_parts,
ffn_exp=ffn_exp,
drop_path=drop_path[i])
for i in range(num_blocks)
]
self.blocks = nn.ModuleList(blocks)
self.last_enc = Encoder(dim=out_ch,
num_enc_heads=num_enc_heads,
num_parts=num_parts,
drop_path=drop_path[-1],
has_ffn=False) if last_enc else None
self._init_weights()
def _init_weights(self):
init.kaiming_uniform_(self.rpn_qpos, a=math.sqrt(5))
trunc_normal_(self.rpn_qpos, std=.02)
init.kaiming_uniform_(self.rpn_kpos, a=math.sqrt(5))
trunc_normal_(self.rpn_kpos, std=.02)
def to_patch(self, x, patch_size, H, W, mask=None):
x = rearrange(x, "b (h w) c -> b h w c", h=H)
pad_l = pad_t = 0
pad_r = int(math.ceil(W / patch_size)) * patch_size - W
pad_b = int(math.ceil(H / patch_size)) * patch_size - H
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
if mask is not None:
mask = F.pad(mask, (pad_l, pad_r, pad_t, pad_b), value=1)
x = rearrange(x, "b (sh kh) (sw kw) c -> b (sh sw) (kh kw) c", kh=patch_size, kw=patch_size)
if mask is not None:
mask = rearrange(mask, "b c (sh kh) (sw kw) -> b c (kh kw) (sh sw)", kh=patch_size, kw=patch_size)
return x, mask, H + pad_b, W + pad_r
def forward(self, x, parts=None, mask=None):
"""
Args:
x: [B, C, H, W]
parts: [B, N, C]
mask: [B, 1, H, W] if exists, else None
Returns:
x: [B, out_C, out_H, out_W]
parts: [B, out_N, out_C]
mask: [B, 1, out_H, out_W] if exists else None
"""
x, H, W, mask = self.proj(x, mask=mask)
x = self.proj_norm(x)
if self.proj_token is not None:
parts = self.proj_token(parts)
rpn_qpos, rpn_kpos = self.rpn_qpos, self.rpn_kpos
rpn_qpos = rpn_qpos.expand(x.shape[0], -1, -1, -1)
rpn_kpos = rpn_kpos.expand(x.shape[0], -1, -1, -1)
ori_H, ori_W = H, W
x, mask, H, W = self.to_patch(x, self.patch_size, H, W, mask)
for blk in self.blocks:
# x: [B, K, P, C]
x, parts, rpn_qpos, mask = blk(x,
parts=parts,
part_qpos=rpn_qpos,
part_kpos=rpn_kpos,
mask=mask)
dec_mask = None if mask is None else rearrange(mask.squeeze(1), "b h w -> b 1 1 (h w)")
if self.last_enc is not None:
x = rearrange(x, "b p k c -> b (p k) c")
rpn_out = self.last_enc(x, parts=parts, qpos=rpn_qpos, mask=dec_mask)
return rpn_out, parts, mask
else:
x = rearrange(x, "b (sh sw) (kh kw) c -> b c (sh kh) (sw kw)", kh=self.patch_size, sh=H // self.patch_size)
x = x[:, :, :ori_H, :ori_W]
return x, parts, mask
class ViP(nn.Module):
def __init__(self,
in_chans=3,
inplanes=64,
num_layers=(3, 4, 6, 3),
num_chs=(256, 512, 1024, 2048),
num_strides=(1, 2, 2, 2),
num_classes=1000,
num_heads=(1, 1, 1, 1),
num_parts=(1, 1, 1, 1),
patch_sizes=(1, 1, 1, 1),
drop_path=0.1,
num_enc_heads=(1, 1, 1, 1),
act=nn.GELU,
ffn_exp=3,
no_pos_wd=False,
has_last_encoder=False,
pretrained=False,
**ret_args):
super(ViP, self).__init__()
self.depth = len(num_layers)
self.no_pos_wd = no_pos_wd
self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, padding=3, stride=2, bias=False)
self.norm1 = nn.BatchNorm2d(inplanes)
self.act = act()
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.rpn_tokens = nn.Parameter(torch.Tensor(1, num_parts[0], inplanes))
drop_path_ratios = torch.linspace(0, drop_path, sum(num_layers))
last_chs = [inplanes, *num_chs[:-1]]
last_nps = [num_parts[0], *num_parts[:-1]]
for i, n_l in enumerate(num_layers):
stage_ratios = [drop_path_ratios[sum(num_layers[:i]) + did] for did in range(n_l)]
setattr(self,
"layer_{}".format(i),
Stage(last_chs[i],
num_chs[i],
n_l,
stride=num_strides[i],
num_heads=num_heads[i],
num_enc_heads=num_enc_heads[i],
patch_size=patch_sizes[i],
drop_path=stage_ratios,
ffn_exp=ffn_exp,
num_parts=num_parts[i],
last_np=last_nps[i],
last_enc=has_last_encoder and i == len(num_layers) - 1)
)
if has_last_encoder:
self.last_fc = nn.Linear(num_chs[-1], num_classes)
else:
self.last_linear = nn.Conv2d(num_chs[-1], num_chs[-1], kernel_size=1, bias=False)
self.last_norm = nn.BatchNorm2d(num_chs[-1])
self.pool2 = nn.AdaptiveAvgPool2d(1)
self.last_fc = nn.Linear(num_chs[-1], num_classes)
self.has_last_encoder = has_last_encoder
self._init_weights(pretrained=pretrained)
@torch.jit.ignore
def no_weight_decay(self):
skip_pattern = ['rel_pos'] if self.no_pos_wd else []
no_wd_layers = set()
for name, param in self.named_parameters():
for skip_name in skip_pattern:
if skip_name in name:
no_wd_layers.add(name)
return no_wd_layers
def _init_weights(self, pretrained=None):
if isinstance(pretrained, str):
state_dict = torch.load(pretrained, map_location=torch.device("cpu"))
if "state_dict" in state_dict.keys():
state_dict = state_dict["state_dict"]
self.load_state_dict(state_dict, strict=True)
return
init.kaiming_uniform_(self.rpn_tokens, a=math.sqrt(5))
trunc_normal_(self.rpn_tokens, std=.02)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
if not torch.sum(m.weight.data == 0).item() == m.num_features: # zero gamma
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
out = self.conv1(x)
out = self.norm1(out)
out = self.act(out)
out = self.pool1(out)
B, _, H, W = out.shape
rpn_tokens, mask = self.rpn_tokens.expand(x.shape[0], -1, -1), None
for i in range(self.depth):
layer = getattr(self, "layer_{}".format(i))
out, rpn_tokens, mask = layer(out, rpn_tokens, mask=mask)
if self.has_last_encoder:
out = self.act(out)
out = out.mean(1)
else:
out = self.last_linear(out)
out = self.last_norm(out)
out = self.act(out)
out = self.pool2(out)
out = out.squeeze()
out = self.last_fc(out).squeeze()
return out.view(out.size(0), -1)
@register_model
def vip_mobile(pretrained=False, **cfg):
model_cfg = dict(inplanes=64, num_chs=(48, 96, 192, 384), patch_sizes=[8, 7, 7, 7], num_heads=[1, 2, 4, 8],
num_enc_heads=[1, 2, 4, 8], num_parts=[16, 16, 16, 32], num_layers=[1, 1, 1, 1], ffn_exp=3,
has_last_encoder=True, drop_path=0., **cfg)
return ViP(pretrained=pretrained, **model_cfg)
@register_model
def vip_tiny(pretrained=False, **cfg):
model_cfg = dict(inplanes=64, num_chs=(64, 128, 256, 512), patch_sizes=[8, 7, 7, 7], num_heads=[1, 2, 4, 8],
num_enc_heads=[1, 2, 4, 8], num_parts=[32, 32, 32, 32], num_layers=[1, 1, 2, 1], ffn_exp=3,
has_last_encoder=True, drop_path=0.1, **cfg)
return ViP(pretrained=pretrained, **model_cfg)
@register_model
def vip_small(pretrained=False, **cfg):
model_cfg = dict(inplanes=64, num_chs=(96, 192, 384, 768), patch_sizes=[8, 7, 7, 7], num_heads=[3, 6, 12, 24],
num_enc_heads=[1, 3, 6, 12], num_parts=[64, 64, 64, 64], num_layers=[1, 1, 3, 1], ffn_exp=3,
has_last_encoder=True, drop_path=0.1, **cfg)
return ViP(pretrained=pretrained, **model_cfg)
@register_model
def vip_medium(pretrained=False, **cfg):
model_cfg = dict(inplanes=64, num_chs=(96, 192, 384, 768), patch_sizes=[8, 7, 7, 7], num_heads=[3, 6, 12, 24],
num_enc_heads=[1, 3, 6, 12], num_parts=[64, 64, 64, 128], num_layers=[1, 1, 8, 1], ffn_exp=3,
has_last_encoder=False, drop_path=0.2, **cfg)
return ViP(pretrained=pretrained, **model_cfg)
@register_model
def vip_base(pretrained=False, **cfg):
model_cfg = dict(inplanes=64, num_chs=(128, 256, 512, 1024), patch_sizes=[8, 7, 7, 7], num_heads=[4, 8, 16, 32],
num_enc_heads=[1, 4, 8, 16], num_parts=[64, 64, 128, 128], num_layers=[1, 1, 8, 1], ffn_exp=3,
has_last_encoder=False, drop_path=0.3, **cfg)
return ViP(pretrained=pretrained, **model_cfg)
|
scripts/specBytesToCode.py | zouhirzz/FreeRDP | 5,941 | 12723303 | <gh_stars>1000+
#!/usr/bin/python
#
# A script to convert blob from the MS spec to array of byte to use in unitary tests
#
# 00000000 c7 01 00 01 20 54 e2
# 00000008 c7 01 00 01 20 54 e2
# taken from the spec, will give:
# 0xc7, 0x01, 0x00, 0x01, 0x20, 0x54, 0xe2,
# 0xc7, 0x01, 0x00, 0x01, 0x20, 0x54, 0xe2,
#
# Notes:
# * the script reads the two first lines to detect the number of items per lines, so you need a blob with at least 2 lines
# * the script detects if items are hex values by searching for + or -
#
# sample usage:
# $ python scripts/specBytesToCode.py < image.txt > image.c
# then go edit image.c and paste that in your code
import sys
def getOffset(l):
token = l.split(' ')[0]
return int(token, 16)
def isHex(l):
return l.find('+') == -1 and l.find('-') == -1
if __name__ == '__main__':
lines = []
itemPerLine = 16
doHex = True
# parse the offset to know how many items per line we have
l1 = sys.stdin.readline().strip()
l2 = sys.stdin.readline().strip()
itemsPerLine = getOffset(l2) - getOffset(l1)
#
doHex = isHex(l1)
for l in [l1, l2] + sys.stdin.readlines():
# 00000000 c7 01 00 01 20 54 e2 cc 00 jh.kjkjhkhk
l = l.strip() # in case we have spaces before the offset
pos = l.find(' ')
l = l[pos+1:]
items = []
tokens = l.strip().split(' ')
ntokens = 0
for t in tokens:
if not t: # empty token
continue
if ntokens == itemPerLine:
break
item = ''
if doHex:
item += '0x'
item += t
items.append(item)
ntokens += 1
lines.append(', '.join(items))
print(",\n".join(lines))
|
using_tor/loop_check_url.py | DazEB2/SimplePyScripts | 117 | 12723315 | <filename>using_tor/loop_check_url.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install -U requests[socks]
import requests
proxies = {
'http': 'socks5://localhost:9050',
'https': 'socks5://localhost:9050',
}
# NOTE: Для смены ip нужно настроить tor
# Для этого создается torrc файл (\Data\Tor\torrc)
# И в него добавляется содержимое:
# NewCircuitPeriod 5
# MaxCircuitDirtiness 10
url = 'http://httpbin.org/ip'
import sys
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('[%(asctime)s] %(message)s'))
log.addHandler(handler)
while True:
try:
log.debug(requests.get(url, proxies=proxies).json())
except requests.exceptions.ConnectionError:
log.debug('WARN: Not found Tor')
import time
time.sleep(1)
|
leo/modes/factor.py | ATikhonov2/leo-editor | 1,550 | 12723347 | <reponame>ATikhonov2/leo-editor<gh_stars>1000+
# Leo colorizer control file for factor mode.
# This file is in the public domain.
# Properties for factor mode.
properties = {
"commentEnd": ")",
"commentStart": "(",
"doubleBracketIndent": "true",
"indentCloseBrackets": "]",
"indentNextLines": "^(\\*<<|:).*",
"indentOpenBrackets": "[",
"lineComment": "!",
"lineUpClosingBracket": "true",
"noWordSep": "+-*=><;.?/'",
}
# Attributes dict for factor_main ruleset.
factor_main_attributes_dict = {
"default": "null",
"digit_re": "-?\\d+([./]\\d+)?",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "+-*=><;.?/'",
}
# Attributes dict for factor_stack_effect ruleset.
factor_stack_effect_attributes_dict = {
"default": "COMMENT4",
"digit_re": "-?\\d+([./]\\d+)?",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "+-*=><;.?/'",
}
# Dictionary of attributes dictionaries for factor mode.
attributesDictDict = {
"factor_main": factor_main_attributes_dict,
"factor_stack_effect": factor_stack_effect_attributes_dict,
}
# Keywords dict for factor_main ruleset.
factor_main_keywords_dict = {
"#{": "operator",
"--": "label",
";": "markup",
"<": "label",
">": "label",
"[": "operator",
"]": "operator",
"f": "literal4",
"r": "keyword1",
"t": "literal3",
"{": "operator",
"|": "operator",
"}": "operator",
"~": "label",
}
# Keywords dict for factor_stack_effect ruleset.
factor_stack_effect_keywords_dict = {}
# Dictionary of keywords dictionaries for factor mode.
keywordsDictDict = {
"factor_main": factor_main_keywords_dict,
"factor_stack_effect": factor_stack_effect_keywords_dict,
}
# Rules for factor_main ruleset.
def factor_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="#!",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def factor_rule1(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="!",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def factor_rule2(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp=":\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule3(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp="IN:\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule4(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp="USE:\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule5(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp="DEFER:\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule6(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="markup", regexp="POSTPONE:\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule7(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="CHAR:\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule8(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="BIN:\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule9(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="OCT:\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule10(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="HEX:\\s+(\\S+)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def factor_rule11(colorer, s, i):
return colorer.match_span(s, i, kind="comment3", begin="(", end=")",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="factor::stack_effect",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def factor_rule12(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def factor_rule13(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for factor_main ruleset.
rulesDict1 = {
"!": [factor_rule1,],
"\"": [factor_rule12,],
"#": [factor_rule0,factor_rule13,],
"(": [factor_rule11,],
"-": [factor_rule13,],
"0": [factor_rule13,],
"1": [factor_rule13,],
"2": [factor_rule13,],
"3": [factor_rule13,],
"4": [factor_rule13,],
"5": [factor_rule13,],
"6": [factor_rule13,],
"7": [factor_rule13,],
"8": [factor_rule13,],
"9": [factor_rule13,],
":": [factor_rule2,],
";": [factor_rule13,],
"<": [factor_rule13,],
">": [factor_rule13,],
"@": [factor_rule13,],
"A": [factor_rule13,],
"B": [factor_rule8,factor_rule13,],
"C": [factor_rule7,factor_rule13,],
"D": [factor_rule5,factor_rule13,],
"E": [factor_rule13,],
"F": [factor_rule13,],
"G": [factor_rule13,],
"H": [factor_rule10,factor_rule13,],
"I": [factor_rule3,factor_rule13,],
"J": [factor_rule13,],
"K": [factor_rule13,],
"L": [factor_rule13,],
"M": [factor_rule13,],
"N": [factor_rule13,],
"O": [factor_rule9,factor_rule13,],
"P": [factor_rule6,factor_rule13,],
"Q": [factor_rule13,],
"R": [factor_rule13,],
"S": [factor_rule13,],
"T": [factor_rule13,],
"U": [factor_rule4,factor_rule13,],
"V": [factor_rule13,],
"W": [factor_rule13,],
"X": [factor_rule13,],
"Y": [factor_rule13,],
"Z": [factor_rule13,],
"[": [factor_rule13,],
"]": [factor_rule13,],
"a": [factor_rule13,],
"b": [factor_rule13,],
"c": [factor_rule13,],
"d": [factor_rule13,],
"e": [factor_rule13,],
"f": [factor_rule13,],
"g": [factor_rule13,],
"h": [factor_rule13,],
"i": [factor_rule13,],
"j": [factor_rule13,],
"k": [factor_rule13,],
"l": [factor_rule13,],
"m": [factor_rule13,],
"n": [factor_rule13,],
"o": [factor_rule13,],
"p": [factor_rule13,],
"q": [factor_rule13,],
"r": [factor_rule13,],
"s": [factor_rule13,],
"t": [factor_rule13,],
"u": [factor_rule13,],
"v": [factor_rule13,],
"w": [factor_rule13,],
"x": [factor_rule13,],
"y": [factor_rule13,],
"z": [factor_rule13,],
"{": [factor_rule13,],
"|": [factor_rule13,],
"}": [factor_rule13,],
"~": [factor_rule13,],
}
# Rules for factor_stack_effect ruleset.
def factor_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="comment3", seq="--",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
# Rules dict for factor_stack_effect ruleset.
rulesDict2 = {
"-": [factor_rule14,],
}
# x.rulesDictDict for factor mode.
rulesDictDict = {
"factor_main": rulesDict1,
"factor_stack_effect": rulesDict2,
}
# Import dict for factor mode.
importDict = {}
|
tests/test_core.py | abraha2d/PyVirtualDisplay | 409 | 12723352 | from time import sleep
import pytest
from pyvirtualdisplay import Display
from pyvirtualdisplay.abstractdisplay import XStartError
from pyvirtualdisplay.xephyr import XephyrDisplay
from pyvirtualdisplay.xvfb import XvfbDisplay
from pyvirtualdisplay.xvnc import XvncDisplay
from tutil import has_xvnc, rfbport
def test_virt():
vd = Display()
assert vd.return_code is None
assert not vd.is_alive()
vd.start()
assert vd.return_code is None
assert vd.is_alive()
vd.stop()
assert vd.return_code == 0
assert not vd.is_alive()
vd = Display().start().stop()
assert vd.return_code == 0
assert not vd.is_alive()
def test_nest():
vd = Display().start()
assert vd.is_alive()
nd = Display(visible=True).start().stop()
assert nd.return_code == 0
vd.stop()
assert not vd.is_alive()
def test_disp():
vd = Display().start()
assert vd.is_alive()
# d = Display(visible=True).start().sleep(2).stop()
# .assertEquals(d.return_code, 0)
d = Display(visible=False).start().stop()
assert d.return_code == 0
vd.stop()
assert not vd.is_alive()
def test_repr_xvfb():
display = Display()
print(repr(display))
display = Display(visible=False)
print(repr(display))
display = Display(backend="xvfb")
print(repr(display))
display = XvfbDisplay()
print(repr(display))
if has_xvnc():
def test_repr_xvnc():
display = Display(backend="xvnc", rfbport=rfbport())
print(repr(display))
display = XvncDisplay()
print(repr(display))
def test_repr_xephyr():
display = Display(visible=True)
print(repr(display))
display = Display(backend="xephyr")
print(repr(display))
display = XephyrDisplay()
print(repr(display))
def test_stop_nostart():
with pytest.raises(XStartError):
Display().stop()
def test_double_start():
vd = Display()
try:
vd.start()
with pytest.raises(XStartError):
vd.start()
finally:
vd.stop()
def test_double_stop():
vd = Display().start().stop()
assert vd.return_code == 0
assert not vd.is_alive()
vd.stop()
assert vd.return_code == 0
assert not vd.is_alive()
def test_stop_terminated():
vd = Display().start()
assert vd.is_alive()
vd._obj._subproc.terminate()
sleep(0.2)
assert not vd.is_alive()
vd.stop()
assert vd.return_code == 0
assert not vd.is_alive()
def test_no_backend():
with pytest.raises(ValueError):
Display(backend="unknown")
def test_color_xvfb():
with pytest.raises(XStartError):
Display(color_depth=99).start().stop()
Display(color_depth=16).start().stop()
Display(color_depth=24).start().stop()
Display(color_depth=8).start().stop()
def test_color_xephyr():
with Display():
# requested screen depth not supported, setting to match hosts
Display(backend="xephyr", color_depth=99).start().stop()
Display(backend="xephyr", color_depth=16).start().stop()
Display(backend="xephyr", color_depth=24).start().stop()
Display(backend="xephyr", color_depth=8).start().stop()
if has_xvnc():
def test_color_xvnc():
with pytest.raises(XStartError):
with Display(backend="xvnc", color_depth=99, rfbport=rfbport()):
pass
with Display(backend="xvnc", color_depth=16, rfbport=rfbport()):
pass
with Display(backend="xvnc", color_depth=24, rfbport=rfbport()):
pass
# tigervnc no longer works 8-bit pseudocolors, 18.04 is OK
# with Display(backend="xvnc", color_depth=8, rfbport=rfbport()):
# pass
def test_pid():
with Display() as d:
assert d.pid > 0
with XvfbDisplay() as d:
assert d.pid > 0
def test_bgcolor():
Display(bgcolor="black").start().stop()
Display(bgcolor="white").start().stop()
with pytest.raises(KeyError):
Display(bgcolor="green").start().stop()
def test_is_started():
# d = Display()
# assert not d._is_started
# d.start()
# assert d._is_started
# d.stop()
# assert d._is_started
# with Display() as d:
# assert d._is_started
# assert d._is_started
with XvfbDisplay() as d:
assert d._is_started
assert d._is_started
with Display():
with XephyrDisplay() as d:
assert d._is_started
assert d._is_started
# with XvncDisplay() as d:
# assert d._is_started
# assert d._is_started
def test_extra_args():
# Unrecognized option
d = Display(extra_args=["willcrash"])
with pytest.raises(XStartError):
d.start()
with Display():
# -c turns off key-click
with Display(visible=True, extra_args=["-c"]) as d:
assert d.is_alive()
assert not d.is_alive()
with XephyrDisplay(extra_args=["-c"]) as d:
assert d.is_alive()
assert not d.is_alive()
def test_display():
d = Display()
assert d.display is None
d.start()
assert d.display >= 0
d = XvfbDisplay()
assert d.display is None
d.start()
assert d.display >= 0
|
recipes/Python/579024_Simple_FIFO_trading_model/recipe-579024.py | tdiprima/code | 2,023 | 12723375 | <reponame>tdiprima/code
from collections import deque
import random
'''
Example below replicates
+75 MSFT 25.10
+50 MSFT 25.12
-100 MSFT 25.22
Realized P&L = 75 * (25.22 - 25.10) + 25 * (25.22 - 25.12) = $ 11.50
A Trade is split into a set of unit positions that are then dequeued on FIFO basis as part of Sell.
'''
number_of_sell_trades = 1000
max_sell_quentity = 5
min_sell_price = 23.00
max_sell_price = 27.00
class TradeManager():
def __init__(self):
# FIFO queue that we can use to enqueue unit buys and
# dequeue unit sells.
self.fifo = deque()
self.profit = []
def __repr__(self):
return 'position size: %d'%(len(self.fifo))
def execute_with_total_pnl(self, direction, quantity, price):
#print direction, quantity, price, 'position size', len(self.fifo)
if len(self.fifo) == 0:
return 0
if 'Sell' in (direction):
if len(self.fifo) >= quantity:
return sum([(price - fill.price) for fill in tm.execute(direction, quantity, price)])
else:
return 0
else:
return [tm.execute(direction, quantity, price)]
def execute(self, direction, quantity, price):
#print direction, quantity, price, 'position size', len(self.fifo)
if direction in ('Buy'):
for i, fill in Trade(direction, quantity, price):
self.fifo.appendleft(fill)
yield fill
elif direction in ('Sell'):
for i, fill in Trade(direction, quantity, price):
yield self.fifo.pop()
class Fill():
def __init__(self, price):
self.price = price
self.quantity = 1
class Trade():
def __init__(self, direction, quantity, price):
self.direction = direction
self.quantity = quantity
self.price = price
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i < self.quantity:
i = self.i
self.i += 1
return i, Fill(self.price)
else:
raise StopIteration()
# create a TradeManager
tm = TradeManager()
# generate some buys
a = [i for i in tm.execute('Buy', 75, 25.10)]
a = [i for i in tm.execute('Buy', 50, 25.12)]
# generate sell
pnl = np.cumsum(tm.execute_with_total_pnl('Sell', 100, 25.22))
# how much did we make
print 'total pnl', pnl[-1:]
# try something more involved.
tm = TradeManager()
pnl_ending = []
# run n simulations
for step in range(0,50):
a = [i for i in tm.execute('Buy', 75000, 25)]
pnl = np.cumsum([tm.execute_with_total_pnl('Sell', quantity, random.uniform(min_sell_price, max_sell_price)) \
for quantity in [random.randint(0,max_sell_quentity) \
for i in range(0,number_of_sell_trades,1)]])
plot(pnl)
pnl_ending.append(pnl[-1:][0])
print 'step', step, 'pnl', pnl[-1:][0], 'avg. pnl', np.mean(pnl_ending), 'diff to mean', pnl[-1:][0]-np.mean(pnl_ending)
print 'avg, total pnl', np.mean(pnl_ending) #pnl[-1:][0]
show()
# bin the results
hist(pnl_ending, 25)
grid(True)
show()
# could lookat fitting and var.
|
zentral/core/probes/migrations/0005_auto_20161104_1343.py | arubdesu/zentral | 634 | 12723414 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-04 13:43
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('probes', '0004_auto_20161103_1728'),
]
operations = [
migrations.AddField(
model_name='probesource',
name='apps',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=255), blank=True, default=[], editable=False, size=None),
preserve_default=False,
),
migrations.AddField(
model_name='probesource',
name='event_types',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=255), blank=True, default=[], editable=False, size=None),
preserve_default=False,
),
migrations.AddField(
model_name='probesource',
name='model',
field=models.CharField(blank=True, editable=False, max_length=255, null=True),
),
migrations.AlterField(
model_name='probesource',
name='body',
field=models.TextField(editable=False),
),
migrations.AlterField(
model_name='probesource',
name='slug',
field=models.SlugField(editable=False, max_length=255, unique=True),
),
]
|
dapper/mods/Lorenz63/extras.py | aperrin66/DAPPER | 225 | 12723415 | <gh_stars>100-1000
"""Extra functionality (not necessary for the EnKF or the particle filter)."""
import numpy as np
import dapper.mods.Lorenz63 as core
import dapper.tools.liveplotting as LP
from dapper.mods.integration import integrate_TLM
def d2x_dtdx(x):
"""Tangent linear model (TLM). I.e. the Jacobian of dxdt(x)."""
x, y, z = x
sig, rho, beta = core.sig, core.rho, core.beta
A = np.array(
[[-sig, sig, 0],
[rho-z, -1, -x],
[y, x, -beta]])
return A
def dstep_dx(x, t, dt):
"""Compute resolvent (propagator) of the TLM. I.e. the Jacobian of `step(x)`."""
return integrate_TLM(d2x_dtdx(x), dt, method='approx')
# Add some non-default liveplotters
params = dict(labels='xyz', Tplot=1)
def LPs(jj=None, params=params): return [
(1, LP.correlations),
(1, LP.sliding_marginals(jj, zoomy=0.8, **params)),
(1, LP.phase_particles(is_3d=True, obs_inds=jj, **params)),
]
|
keepassc/control.py | thorkill/keepassc | 238 | 12723419 | # -*- coding: utf-8 -*-
import curses as cur
import logging
from curses.ascii import NL, DEL, SP
from datetime import date, datetime
from os import chdir, getcwd, getenv, geteuid, makedirs, remove
from os.path import expanduser, isfile, isdir, realpath, join
from pwd import getpwuid
from random import sample
from socket import gethostname, socket, AF_INET, SOCK_STREAM, SHUT_RDWR
from sys import exit
from kppy.database import KPDBv1
from kppy.exceptions import KPError
from keepassc.conn import *
from keepassc.client import Client
from keepassc.editor import Editor
from keepassc.helper import parse_config, write_config
from keepassc.filebrowser import FileBrowser
from keepassc.dbbrowser import DBBrowser
class Control(object):
'''This class represents the whole application.'''
def __init__(self):
'''The __init__-method.
It just initializes some variables and settings and changes
the working directory to /var/empty to prevent coredumps as
normal user.
'''
try:
self.config_home = realpath(expanduser(getenv('XDG_CONFIG_HOME')))
except:
self.config_home = realpath(expanduser('~/.config'))
finally:
self.config_home = join(self.config_home, 'keepassc', 'config')
try:
self.data_home = realpath(expanduser(getenv('XDG_DATA_HOME')))
except:
self.data_home = realpath(expanduser('~/.local/share/'))
finally:
self.data_home = join(self.data_home, 'keepassc')
self.last_home = join(self.data_home, 'last')
self.remote_home = join(self.data_home, 'remote')
self.key_home = join(self.data_home, 'key')
self.config = parse_config(self)
if self.config['rem_key'] is False and isfile(self.key_home):
remove(self.key_home)
self.initialize_cur()
self.last_file = None
self.last_key = None
self.loginname = getpwuid(geteuid())[0]
self.hostname = gethostname()
self.cur_dir = getcwd()
chdir('/var/empty')
self.db = None
def initialize_cur(self):
'''Method to initialize curses functionality'''
self.stdscr = cur.initscr()
try:
cur.curs_set(0)
except:
print('Invisible cursor not supported')
cur.cbreak()
cur.noecho()
self.stdscr.keypad(1)
cur.start_color()
cur.use_default_colors()
cur.init_pair(1, -1, -1)
cur.init_pair(2, 2, -1)
cur.init_pair(3, -1, 1)
cur.init_pair(4, 6, -1)
cur.init_pair(5, 0, 6)
cur.init_pair(6, 0, 7)
cur.init_pair(7, 1, -1)
self.stdscr.bkgd(1)
self.ysize, self.xsize = self.stdscr.getmaxyx()
self.group_win = cur.newwin(self.ysize - 1, int(self.xsize / 3),
1, 0)
# 11 is the y size of info_win
self.entry_win = cur.newwin((self.ysize - 1) - 11,
int(2 * self.xsize / 3),
1, int(self.xsize / 3))
self.info_win = cur.newwin(11,
int(2 * self.xsize / 3),
(self.ysize - 1) - 11,
int(self.xsize / 3))
self.group_win.keypad(1)
self.entry_win.keypad(1)
self.group_win.bkgd(1)
self.entry_win.bkgd(1)
self.info_win.bkgd(1)
def resize_all(self):
'''Method to resize windows'''
self.ysize, self.xsize = self.stdscr.getmaxyx()
self.group_win.resize(self.ysize - 1, int(self.xsize / 3))
self.entry_win.resize(
self.ysize - 1 - 11, int(2 * self.xsize / 3))
self.info_win.resize(11, int(2 * self.xsize / 3))
self.group_win.mvwin(1, 0)
self.entry_win.mvwin(1, int(self.xsize / 3))
self.info_win.mvwin((self.ysize - 1) - 11, int(self.xsize / 3))
def any_key(self):
'''If any key is needed.'''
while True:
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == 4:
return -1
elif e == cur.KEY_RESIZE:
self.resize_all()
else:
return e
def draw_text(self, changed, *misc):
'''This method is a wrapper to display some text on stdscr.
misc is a list that should consist of 3-tuples which holds
text to display.
(1st element: y-coordinate, 2nd: x-coordinate, 3rd: text)
'''
if changed is True:
cur_dir = self.cur_dir + '*'
else:
cur_dir = self.cur_dir
try:
self.stdscr.clear()
self.stdscr.addstr(
0, 0, self.loginname + '@' + self.hostname + ':',
cur.color_pair(2))
self.stdscr.addstr(
0, len(self.loginname + '@' + self.hostname + ':'),
cur_dir)
for i, j, k in misc:
self.stdscr.addstr(i, j, k)
except: # to prevent a crash if screen is small
pass
finally:
self.stdscr.refresh()
def draw_help(self, *text):
"""Draw a help
*text are arbitary string
"""
if len(text) > self.ysize -1:
length = self.ysize - 1
offset = 0
spill = len(text) - self.ysize + 2
else:
length = len(text)
offset = 0
spill = 0
while True:
try:
self.draw_text(False)
for i in range(length):
self.stdscr.addstr(
i + 1, 0, text[(i + offset)])
except:
pass
finally:
self.stdscr.refresh()
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == cur.KEY_DOWN:
if offset < spill:
offset += 1
elif e == cur.KEY_UP:
if offset > 0:
offset -= 1
elif e == NL:
return
elif e == cur.KEY_RESIZE:
self.resize_all()
if len(text) > self.ysize -1:
length = self.ysize - 1
offset = 0
spill = len(text) - self.ysize + 2
else:
length = len(text)
offset = 0
spill = 0
elif e == 4:
if self.db is not None:
self.db.close()
self.close()
def get_password(self, std, needed=True):
'''This method is used to get a password.
The pasword will not be displayed during typing.
std is a string that should be displayed. If needed is True it
is not possible to return an emptry string.
'''
password = Editor(self.stdscr, max_text_size=1, win_location=(0, 1),
win_size=(1, self.xsize), title=std, pw_mode=True)()
if needed is True and not password:
return False
else:
return password
def get_authentication(self):
"""Get authentication credentials"""
while True:
if (self.config['skip_menu'] is False or
(self.config['rem_db'] is False and
self.config['rem_key'] is False)):
auth = self.gen_menu(1, (
(1, 0, 'Use a password (1)'),
(2, 0, 'Use a keyfile (2)'),
(3, 0, 'Use both (3)')),
(5, 0, 'Press \'F5\' to go back to main '
'menu'))
else:
self.draw_text(False)
auth = 3
if auth is False:
return False
elif auth == -1:
self.close()
if auth == 1 or auth == 3:
if self.config['skip_menu'] is True:
needed = False
else:
needed = True
password = self.get_password('Password: ', needed = needed)
if password is False:
self.config['skip_menu'] = False
continue
elif password == -1:
self.close()
# happens only if self.config['skip_menu'] is True
elif password == "":
password = <PASSWORD>
if auth != 3:
keyfile = None
if auth == 2 or auth == 3:
# Ugly construct but works
# "if keyfile is False" stuff is needed to implement the
# return to previous screen stuff
# Use similar constructs elsewhere
while True:
self.get_last_key()
if (self.last_key is None or
self.config['rem_key'] is False):
ask_for_lf = False
else:
ask_for_lf = True
keyfile = FileBrowser(self, ask_for_lf, True,
self.last_key)()
if keyfile is False:
break
elif keyfile == -1:
self.close()
elif not isfile(keyfile):
self.draw_text(False,
(1, 0, 'That\'s not a file'),
(3, 0, 'Press any key.'))
if self.any_key() == -1:
self.close()
continue
break
if keyfile is False:
continue
if auth != 3:
password = None
if self.config['rem_key'] is True:
if not isdir(self.key_home[:-4]):
if isfile(self.key_home[:-4]):
remove(self.key_home[:-4])
makedirs(self.key_home[:-4])
handler = open(self.key_home, 'w')
handler.write(keyfile)
handler.close()
break
return (password, keyfile)
def get_last_db(self):
if isfile(self.last_home) and self.config['rem_db'] is False:
remove(self.last_home)
self.last_file = None
elif isfile(self.last_home):
try:
handler = open(self.last_home, 'r')
except Exception as err:
self.last_file = None
print(err.__str__())
else:
self.last_file = handler.readline()
handler.close()
else:
self.last_file = None
def get_last_key(self):
if isfile(self.key_home) and self.config['rem_key'] is False:
remove(self.key_home)
self.last_key = None
elif isfile(self.key_home):
try:
handler = open(self.key_home, 'r')
except Exception as err:
self.last_key = None
print(err.__str__())
else:
self.last_key = handler.readline()
handler.close()
else:
self.last_key = None
def gen_pass(self):
'''Method to generate a password'''
while True:
items = self.gen_check_menu(((1, 0, 'Include numbers'),
(2, 0,
'Include capitalized letters'),
(3, 0, 'Include special symbols'),
(4, 0, 'Include space')),
(6, 0, 'Press space to un-/check'),
(7, 0,
'Press return to enter options'))
if items is False or items == -1:
return items
length = self.get_num('Password length: ')
if length is False:
continue
elif length == -1:
return -1
char_set = 'abcdefghijklmnopqrstuvwxyz'
if items[0] == 1:
char_set += '1234567890'
if items[1] == 1:
char_set += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if items[2] == 1:
char_set += '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~$'
if items[3] == 1:
char_set += ' '
password = ''
for _ in range(length):
password += sample(char_set, 1)[0]
return password
def get_exp_date(self, *exp):
nav = self.gen_menu(1,
((1, 0, 'Expires never (1)'),
(2, 0, 'Set expire date manual (2)'),
(3, 0, 'Expires tomorrow (3)'),
(4, 0, 'Expires in 1 week (4)'),
(5, 0, 'Expires in 2 weeks (5)'),
(6, 0, 'Expires in 1 month (6)'),
(7, 0, 'Expires in 3 months (7)'),
(8, 0, 'Expires in 6 months (8)'),
(9, 0, 'Expires in 1 year (9)')))
tmp_date = datetime.now()
d = tmp_date.day
m = tmp_date.month
y = tmp_date.year
if nav == 1:
exp_date = (2999, 12, 28)
elif nav == 2:
if exp:
exp_date = self.get_manual_exp_date(exp[0], exp[1], exp[2])
else:
exp_date = self.get_manual_exp_date()
elif nav == 3:
if (((m == 1 or m == 3 or m == 5 or m == 7 or m == 8 or m == 10) and d == 31) or
((m == 4 or m == 6 or m == 9 or m == 11) and d == 30)):
exp_date = (y, m + 1, 1)
elif (m == 2 and ((y % 4 == 0 and (y % 100 != 0 or y % 400 == 0) and d == 29) or d == 28)):
exp_date = (y, 3, 1)
elif m == 12 and d == 31:
exp_date = (y + 1, 1, 1)
else:
exp_date = (y, m, d + 1)
elif nav == 4:
if ((m == 1 or m == 3 or m == 5 or m == 7 or m == 8 or m == 10) and d + 7 > 31):
exp_date = (y, m + 1, (d + 7) % 31)
elif ((m == 4 or m == 6 or m == 9 or m == 11) and d + 7 > 30):
exp_date = (y, m + 1, (d + 7) % 30)
elif (m == 2 and (y % 4 == 0 and (y % 100 != 0 or y % 400 == 0) and d + 7 > 29)):
exp_date = (y, 3, (d + 7) % 29)
elif (m == 2 and d + 7 > 28):
exp_date = (y, 3, (d + 7) % 28)
elif m == 12 and d + 7 > 31:
exp_date = (y + 1, 1, (d + 7) % 31)
else:
exp_date = (y, m, d + 7)
elif nav == 5:
if ((m == 1 or m == 3 or m == 5 or m == 7 or m == 8 or m == 10) and d + 14 > 31):
exp_date = (y, m + 1, (d + 14) % 31)
elif ((m == 4 or m == 6 or m == 9 or m == 11) and d + 14 > 30):
exp_date = (y, m + 1, (d + 14) % 30)
elif (m == 2 and (y % 4 == 0 and (y % 100 != 0 or y % 400 == 0) and d + 14 > 29)):
exp_date = (y, 3, (d + 14) % 29)
elif (m == 2 and d + 14 > 28):
exp_date = (y, 3, (d + 14) % 28)
elif m == 12 and d + 14 > 31:
exp_date = (y + 1, 1, (d + 14) % 31)
else:
exp_date = (y, m, d + 14)
elif nav == 6:
if m == 1 and (y % 4 == 0 and (y % 100 != 0 or y % 400 == 0) and d > 29):
exp_date = (y, 3, d % 29)
elif (m == 1 and d > 28):
exp_date = (y, 3, d % 28)
elif (m == 3 or m == 5 or m == 7 or m == 8 or m == 10) and d == 31:
exp_date = (y, m + 2, 1)
elif m == 12:
exp_date = (y + 1, 1, d)
else:
exp_date = (y, m + 1, d)
elif nav == 7:
if (m == 1 or m == 3 or m == 8) and d == 31:
exp_date = (y, m + 4, 1)
elif m == 10 or m == 12:
exp_date = (y + 1, (m + 3) % 12, d)
elif m == 11 and ((y + 1) % 4 == 0 and ((y + 1) % 100 != 0 or (y + 1) % 400 == 0)) and d > 29:
exp_date = (y + 1, 3, d % 29)
elif m == 11 and d > 28:
exp_date = (y + 1, 3, d % 28)
else:
exp_date = (y, m + 3, d)
elif nav == 8:
if (m == 3 or m == 5) and d == 31:
exp_date = (y, m + 7, 1)
elif (m == 7 or m == 9 or m == 11):
exp_date = (y + 1, (m + 6) % 12, d)
elif m == 8 and ((y + 1) % 4 == 0 and ((y + 1) % 100 != 0 or (y + 1) % 400 == 0)) and d > 29:
exp_date = (y + 1, 3, d % 29)
elif m == 8 and d > 28:
exp_date = (y + 1, 3, d % 28)
elif (m == 10 or m == 12) and d == 31:
exp_date = (y + 1, (m + 7) % 12, 1)
elif (m == 8 or m == 10 or m == 12):
exp_date = (y + 1, (m + 6) % 12, d)
else:
exp_date = (y, m + 6, d)
elif nav == 9:
if m == 2 and d == 29:
exp_date = (y + 1, 3, 1)
else:
exp_date = (y + 1, m, d)
elif nav == -1:
return -1
elif nav is False:
return False
return exp_date
def get_manual_exp_date(self, *exp):
'''This method is used to get an expiration date for entries.
exp is used to display an actual expiration date.
'''
pass_y = False
pass_mon = False
goto_last = False
while True:
if pass_y is False:
edit = ''
e = cur.KEY_BACKSPACE
while e != NL:
if (e == cur.KEY_BACKSPACE or e == DEL) and len(edit) != 0:
edit = edit[:-1]
elif e == cur.KEY_BACKSPACE or e == DEL:
pass
elif e == 4:
return -1
elif e == cur.KEY_RESIZE:
self.resize_all()
elif e == cur.KEY_F5:
return False
elif len(edit) < 4 and e >= 48 and e <= 57:
edit += chr(e)
self.draw_text(False,
(1, 0, 'Special date 2999-12-28 means that '
'the expires never.'),
(3, 0, 'Year: ' + edit))
if exp:
try:
self.stdscr.addstr(2, 0,
'Actual expiration date: ' +
str(exp[0]) + '-' +
str(exp[1]) + '-' +
str(exp[2]))
except:
pass
finally:
self.stdscr.refresh()
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == NL and edit == '':
e = cur.KEY_BACKSPACE
continue
y = int(edit)
pass_y = True
if pass_mon is False:
edit = ''
e = cur.KEY_BACKSPACE
while e != NL:
if (e == cur.KEY_BACKSPACE or e == DEL) and len(edit) != 0:
edit = edit[:-1]
elif e == cur.KEY_BACKSPACE or e == DEL:
pass
elif e == 4:
return -1
elif e == cur.KEY_RESIZE:
self.resize_all()
elif e == cur.KEY_F5:
pass_y = False
goto_last = True
break
elif len(edit) < 2 and e >= 48 and e <= 57:
edit += chr(e)
self.draw_text(False,
(1, 0, 'Special date 2999-12-28 means that '
'the expires never.'),
(3, 0, 'Year: ' + str(y)),
(4, 0, 'Month: ' + edit))
if exp:
try:
self.stdscr.addstr(2, 0,
'Actual expiration date: ' +
str(exp[0]) + '-' +
str(exp[1]) + '-' +
str(exp[2]))
except:
pass
finally:
self.stdscr.refresh()
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == NL and edit == '':
e = cur.KEY_BACKSPACE
continue
elif e == NL and (int(edit) > 12 or int(edit) < 1):
self.draw_text(False,
(1, 0,
'Month must be between 1 and 12. '
'Press any key.'))
if self.any_key() == -1:
return -1
e = ''
if goto_last is True:
goto_last = False
continue
mon = int(edit)
pass_mon = True
edit = ''
e = cur.KEY_BACKSPACE
while e != NL:
if (e == cur.KEY_BACKSPACE or e == DEL) and len(edit) != 0:
edit = edit[:-1]
elif e == cur.KEY_BACKSPACE or e == DEL:
pass
elif e == 4:
return -1
elif e == cur.KEY_RESIZE:
self.resize_all()
elif e == cur.KEY_F5:
pass_mon = False
goto_last = True
break
elif len(edit) < 2 and e >= 48 and e <= 57:
edit += chr(e)
self.draw_text(False,
(1, 0, 'Special date 2999-12-28 means that the '
'expires never.'),
(3, 0, 'Year: ' + str(y)),
(4, 0, 'Month: ' + str(mon)),
(5, 0, 'Day: ' + edit))
if exp:
try:
self.stdscr.addstr(2, 0, 'Actual expiration date: ' +
str(exp[0]) + '-' +
str(exp[1]) + '-' +
str(exp[2]))
except:
pass
finally:
self.stdscr.refresh()
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == NL and edit == '':
e = cur.KEY_BACKSPACE
continue
elif (e == NL and (mon == 1 or mon == 3 or mon == 5 or
mon == 7 or mon == 8 or mon == 10 or
mon == 12) and
(int(edit) > 31 or int(edit) < 0)):
self.draw_text(False,
(1, 0,
'Day must be between 1 and 31. Press '
'any key.'))
if self.any_key() == -1:
return -1
e = ''
elif (e == NL and mon == 2 and (int(edit) > 28 or
int(edit) < 0)):
self.draw_text(False,
(1, 0,
'Day must be between 1 and 28. Press '
'any key.'))
if self.any_key() == -1:
return -1
e = ''
elif (e == NL and (mon == 4 or mon == 6 or mon == 9 or
mon == 11) and (int(edit) > 30 or int(edit) < 0)):
self.draw_text(False,
(1, 0,
'Day must be between 1 and 30. Press '
'any key.'))
if self.any_key() == -1:
return -1
e = ''
if goto_last is True:
goto_last = False
pass_mon = False
continue
d = int(edit)
break
return (y, mon, d)
def get_num(self, std='', edit='', length=4):
'''Method to get a number'''
edit = edit
e = 60 # just an unrecognized letter
while e != NL:
if (e == cur.KEY_BACKSPACE or e == DEL) and len(edit) != 0:
edit = edit[:-1]
elif e == cur.KEY_BACKSPACE or e == DEL:
pass
elif e == 4:
return -1
elif e == cur.KEY_RESIZE:
self.resize_all()
elif e == cur.KEY_F5:
return False
elif len(edit) < length and e >= 48 and e <= 57:
edit += chr(e)
self.draw_text(False,
(1, 0, std + edit))
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == NL and edit == '':
e = cur.KEY_BACKSPACE
continue
return int(edit)
def gen_menu(self, highlight, misc, *add):
'''A universal method to generate a menu.
misc is a tupel of triples (y, x, 'text')
add are more tuples but the content should not be accessable
'''
if len(misc) == 0:
return False
h_color = 6
n_color = 1
e = ''
while e != NL:
try:
self.stdscr.clear()
self.stdscr.addstr(
0, 0, self.loginname + '@' + self.hostname + ':',
cur.color_pair(2))
self.stdscr.addstr(0,
len(self.loginname +
'@' + self.hostname + ':'),
self.cur_dir)
for i, j, k in misc:
if i == highlight:
self.stdscr.addstr(i, j, k, cur.color_pair(h_color))
else:
self.stdscr.addstr(i, j, k, cur.color_pair(n_color))
for i, j, k in add:
self.stdscr.addstr(i, j, k)
except:
pass
finally:
self.stdscr.refresh()
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == 4:
return -1
elif e == cur.KEY_RESIZE:
self.resize_all()
elif e == cur.KEY_F5:
return False
elif e == NL:
return highlight
elif (e == cur.KEY_DOWN or e == ord('j')) and highlight < len(misc):
highlight += 1
elif (e == cur.KEY_UP or e == ord('k')) and highlight > 1:
highlight -= 1
elif 49 <= e <= 48 + len(misc): # ASCII(49) = 1 ...
return e - 48
def gen_check_menu(self, misc, *add):
'''Print a menu with checkable entries'''
if len(misc) == 0:
return False
items = []
for i in range(len(misc)):
items.append(0)
highlight = 1
h_color = 6
n_color = 1
e = ''
while e != NL:
try:
self.stdscr.clear()
self.stdscr.addstr(
0, 0, self.loginname + '@' + self.hostname + ':',
cur.color_pair(2))
self.stdscr.addstr(0,
len(self.loginname +
'@' + self.hostname + ':'),
self.cur_dir)
for i, j, k in misc:
if items[i - 1] == 0:
check = '[ ]'
else:
check = '[X]'
if i == highlight:
self.stdscr.addstr(
i, j, check + k, cur.color_pair(h_color))
else:
self.stdscr.addstr(
i, j, check + k, cur.color_pair(n_color))
for i, j, k in add:
self.stdscr.addstr(i, j, k)
except:
pass
finally:
self.stdscr.refresh()
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == 4:
return -1
elif e == cur.KEY_RESIZE:
self.resize_all()
elif e == cur.KEY_F5:
return False
elif e == SP:
if items[highlight - 1] == 0:
items[highlight - 1] = 1
else:
items[highlight - 1] = 0
elif (e == cur.KEY_DOWN or e == ord('j')) and highlight < len(misc):
highlight += 1
elif (e == cur.KEY_UP or e == ord('k')) and highlight > 1:
highlight -= 1
elif e == NL:
return items
def gen_config_menu(self):
'''The configuration menu'''
self.config = parse_config(self)
menu = 1
while True:
menu = self.gen_menu(menu,
((1, 0, 'Delete clipboard automatically: ' +
str(self.config['del_clip'])),
(2, 0, 'Waiting time (seconds): ' +
str(self.config['clip_delay'])),
(3, 0, 'Lock database automatically: ' +
str(self.config['lock_db'])),
(4, 0, 'Waiting time (seconds): ' +
str(self.config['lock_delay'])),
(5, 0, 'Remember last database: ' +
str(self.config['rem_db'])),
(6, 0, 'Remember last keyfile: ' +
str(self.config['rem_key'])),
(7, 0, 'Use directly password and key if one of the two '
'above is True: ' +
str(self.config['skip_menu'])),
(8, 0, 'Pin server certificate: ' + str(self.config['pin'])),
(9, 0, 'Generate default configuration'),
(10, 0, 'Write config')),
(12, 0, 'Automatic locking works only for saved databases!'))
if menu == 1:
if self.config['del_clip'] is True:
self.config['del_clip'] = False
elif self.config['del_clip'] is False:
self.config['del_clip'] = True
elif menu == 2:
delay = self.get_num('Waiting time: ',
str(self.config['clip_delay']))
if delay is False:
continue
elif delay == -1:
self.close()
else:
self.config['clip_delay'] = delay
elif menu == 3:
if self.config['lock_db'] is True:
self.config['lock_db'] = False
elif self.config['lock_db'] is False:
self.config['lock_db'] = True
elif menu == 4:
delay = self.get_num('Waiting time: ',
str(self.config['lock_delay']))
if delay is False:
continue
elif delay == -1:
self.close()
else:
self.config['lock_delay'] = delay
elif menu == 5:
if self.config['rem_db'] is True:
self.config['rem_db'] = False
elif self.config['rem_db'] is False:
self.config['rem_db'] = True
elif menu == 6:
if self.config['rem_key'] is True:
self.config['rem_key'] = False
elif self.config['rem_key'] is False:
self.config['rem_key'] = True
elif menu == 7:
if self.config['skip_menu'] is True:
self.config['skip_menu'] = False
elif self.config['skip_menu'] is False:
self.config['skip_menu'] = True
elif menu == 8:
if self.config['pin'] is True:
self.config['pin'] = False
elif self.config['pin'] is False:
self.config['pin'] = True
elif menu == 9:
self.config = {'del_clip': True, # standard config
'clip_delay': 20,
'lock_db': True,
'lock_delay': 60,
'rem_db': True,
'rem_key': False,
'skip_menu': False,
'pin': True}
elif menu == 10:
write_config(self, self.config)
return True
elif menu is False:
return False
elif menu == -1:
self.close()
def draw_lock_menu(self, changed, highlight, *misc):
'''Draw menu for locked database'''
h_color = 6
n_color = 1
if changed is True:
cur_dir = self.cur_dir + '*'
else:
cur_dir = self.cur_dir
try:
self.stdscr.clear()
self.stdscr.addstr(
0, 0, self.loginname + '@' + self.hostname + ':',
cur.color_pair(2))
self.stdscr.addstr(
0, len(self.loginname + '@' + self.hostname + ':'),
cur_dir)
for i, j, k in misc:
if i == highlight:
self.stdscr.addstr(i, j, k, cur.color_pair(h_color))
else:
self.stdscr.addstr(i, j, k, cur.color_pair(n_color))
#except: # to prevent a crash if screen is small
# pass
finally:
self.stdscr.refresh()
def main_loop(self, kdb_file=None, remote = False):
'''The main loop. The program alway return to this method.'''
if remote is True:
self.remote_interface()
else:
# This is needed to remember last database and open it directly
self.get_last_db()
if kdb_file is not None:
self.cur_dir = kdb_file
if self.open_db(True) is True:
db = DBBrowser(self)
del db
last = self.cur_dir.split('/')[-1]
self.cur_dir = self.cur_dir[:-len(last) - 1]
elif self.last_file is not None and self.config['rem_db'] is True:
self.cur_dir = self.last_file
if self.open_db(True) is True:
db = DBBrowser(self)
del db
last = self.cur_dir.split('/')[-1]
self.cur_dir = self.cur_dir[:-len(last) - 1]
while True:
self.get_last_db()
menu = self.gen_menu(1, ((1, 0, 'Open existing database (1)'),
(2, 0, 'Create new database (2)'),
(3, 0, 'Connect to a remote database(3)'),
(4, 0, 'Configuration (4)'),
(5, 0, 'Quit (5)')),
(7, 0, 'Type \'F1\' for help inside the editor, '
'file or database browser.'),
(8, 0, 'Type \'F5\' to return to the previous'
' dialog at any time.'))
if menu == 1:
if self.open_db() is False:
continue
db = DBBrowser(self)
del db
last = self.cur_dir.split('/')[-1]
self.cur_dir = self.cur_dir[:-len(last) - 1]
elif menu == 2:
while True:
auth = self.gen_menu(1, (
(1, 0, 'Use a password (1)'),
(2, 0, 'Use a keyfile (2)'),
(3, 0, 'Use both (3)')))
password = None
confirm = None
filepath = None
self.db = KPDBv1(new=True)
if auth is False:
break
elif auth == -1:
self.db = None
self.close()
if auth == 1 or auth == 3:
while True:
password = self.get_password('Password: ')
if password is False:
break
elif password == -1:
self.db = None
self.close()
confirm = self.get_password('Confirm: ')
if confirm is False:
break
elif confirm == -1:
self.db = None
self.close()
if password == confirm:
self.db.password = password
break
else:
self.draw_text(False,
(1, 0,
'Passwords didn\' match!'),
(3, 0, 'Press any key'))
if self.any_key() == -1:
self.db = None
self.close()
if auth != 3:
self.db.keyfile = None
if password is False or confirm is False:
continue
if auth == 2 or auth == 3:
while True:
filepath = FileBrowser(self, False, True, None)()
if filepath is False:
break
elif filepath == -1:
self.close()
elif not isfile(filepath):
self.draw_text(False,
(1, 0, 'That\' not a file!'),
(3, 0, 'Press any key'))
if self.any_key() == -1:
self.db = None
self.close()
continue
break
if filepath is False:
continue
self.db.keyfile = filepath
if auth != 3:
self.db.password = None
if auth is not False:
db = DBBrowser(self)
del db
last = self.cur_dir.split('/')[-1]
self.cur_dir = self.cur_dir[:-len(last) - 1]
else:
self.db = None
break
elif menu == 3:
self.remote_interface()
elif menu == 4:
self.gen_config_menu()
elif menu == 5 or menu is False or menu == -1:
self.close()
def open_db(self, skip_fb=False):
''' This method opens a database.'''
if skip_fb is False:
filepath = FileBrowser(self, True, False, self.last_file)()
if filepath is False:
return False
elif filepath == -1:
self.close()
else:
self.cur_dir = filepath
ret = self.get_authentication()
if ret is False:
return False
password, keyfile = ret
try:
if isfile(self.cur_dir + '.lock'):
self.draw_text(False,
(1, 0, 'Database seems to be opened.'
' Open file in read-only mode?'
' [(y)/n]'))
while True:
try:
e = self.stdscr.getch()
except KeyboardInterrupt:
e = 4
if e == ord('n'):
read_only = False
break
elif e == 4:
self.close()
elif e == cur.KEY_RESIZE:
self.resize_all()
elif e == cur.KEY_F5:
return False
else:
read_only = True
break
else:
read_only = False
self.db = KPDBv1(self.cur_dir, password, keyfile, read_only)
self.db.load()
return True
except KPError as err:
self.draw_text(False,
(1, 0, err.__str__()),
(4, 0, 'Press any key.'))
if self.any_key() == -1:
self.close()
last = self.cur_dir.split('/')[-1]
self.cur_dir = self.cur_dir[:-len(last) - 1]
return False
def remote_interface(self, ask_for_agent = True, agent = False):
if ask_for_agent is True and agent is False:
use_agent = self.gen_menu(1, ((1, 0, 'Use agent (1)'),
(2, 0, 'Use no agent (2)')))
elif agent is True:
use_agent = 1
else:
use_agent = 2
if use_agent == 1:
port = self.get_num("Agent port: ", "50001", 5)
if port is False:
return False
elif port == -1:
self.close()
sock = socket(AF_INET, SOCK_STREAM)
sock.settimeout(60)
try:
sock.connect(('localhost', port))
sendmsg(sock, build_message((b'GET',)))
except OSError as err:
self.draw_text(False, (1, 0, err.__str__()),
(3, 0, "Press any key."))
if self.any_key() == -1:
self.close()
return False
db_buf = receive(sock)
if db_buf[:4] == b'FAIL' or db_buf[:4] == b'[Err':
self.draw_text(False,
(1, 0, db_buf),
(3, 0, 'Press any key.'))
if self.any_key() == -1:
self.close()
return False
sock.shutdown(SHUT_RDWR)
sock.close()
sock = socket(AF_INET, SOCK_STREAM)
sock.settimeout(60)
try:
sock.connect(('localhost', port))
sendmsg(sock, build_message((b'GETC',)))
except OSError as err:
self.draw_text(False, (1, 0, err.__str__()),
(3, 0, "Press any key."))
if self.any_key() == -1:
self.close()
return False
answer = receive(sock)
parts = answer.split(b'\xB2\xEA\xC0')
password = parts.pop(0).decode()
keyfile_cont = parts.pop(0).decode()
if keyfile_cont == '':
keyfile = None
else:
if not isdir('/tmp/keepassc'):
makedirs('/tmp/keepassc')
with open('/tmp/keepassc/tmp_keyfile', 'w') as handler:
handler.write(parts.pop(0).decode())
keyfile = '/tmp/keepassc/tmp_keyfile'
server = parts.pop(0).decode()
port = int(parts.pop(0))
if parts.pop(0) == b'True':
ssl = True
else:
ssl = False
tls_dir = parts.pop(0).decode()
elif use_agent is False:
return False
elif use_agent == -1:
self.close()
else:
if isfile(self.remote_home):
with open(self.remote_home, 'r') as handler:
last_address = handler.readline()
last_port = handler.readline()
else:
last_address = '127.0.0.1'
last_port = None
pass_auth = False
pass_ssl = False
while True:
if pass_auth is False:
ret = self.get_authentication()
if ret is False:
return False
elif ret == -1:
self.close()
password, keyfile = ret
pass_auth = True
if pass_ssl is False:
ssl = self.gen_menu(1, ((1, 0, 'Use SSL/TLS (1)'),
(2, 0, 'Plain text (2)')))
if ssl is False:
pass_auth = False
continue
elif ssl == -1:
self.close()
pass_ssl = True
server = Editor(self.stdscr, max_text_size=1,
inittext=last_address,
win_location=(0, 1),
win_size=(1, self.xsize),
title="Server address")()
if server is False:
pass_ssl = False
continue
elif server == -1:
self.close()
if last_port is None:
if ssl == 1:
ssl = True # for later use
std_port = "50003"
else:
ssl = False
std_port = "50000"
else:
if ssl == 1:
ssl = True # for later use
else:
ssl = False
std_port = last_port
port = self.get_num("Server port: ", std_port, 5)
if port is False:
path_auth = True
path_ssl = True
continue
elif port == -1:
self.close()
break
if ssl is True:
try:
datapath = realpath(expanduser(getenv('XDG_DATA_HOME')))
except:
datapath = realpath(expanduser('~/.local/share'))
finally:
tls_dir = join(datapath, 'keepassc')
else:
tls_dir = None
client = Client(logging.INFO, 'client.log', server, port,
password, keyfile, ssl, tls_dir)
db_buf = client.get_db()
if db_buf[:4] == 'FAIL' or db_buf[:4] == "[Err":
self.draw_text(False,
(1, 0, db_buf),
(3, 0, 'Press any key.'))
if self.any_key() == -1:
self.close()
return False
self.db = KPDBv1(None, password, keyfile)
self.db.load(db_buf)
db = DBBrowser(self, True, server, port, ssl, tls_dir)
del db
return True
def browser_help(self, mode_new):
'''Print help for filebrowser'''
if mode_new:
self.draw_help(
'Navigate with arrow keys.',
'\'o\' - choose directory',
'\'e\' - abort',
'\'H\' - show/hide hidden files',
'\'ngg\' - move to line n',
'\'G\' - move to last line',
'/text - go to \'text\' (like in vim/ranger)',
'\n',
'Press return.')
else:
self.draw_help(
'Navigate with arrow keys.',
'\'q\' - close program',
'\'e\' - abort',
'\'H\' - show/hide hidden files',
'\'ngg\' - move to line n',
'\'G\' - move to last line',
'/text - go to \'text\' (like in vim/ranger)',
'\n',
'Press return.')
def dbbrowser_help(self):
self.draw_help(
'\'e\' - go to main menu',
'\'q\' - close program',
'\'CTRL+D\' or \'CTRL+C\' - close program at any time',
'\'x\' - save db and close program',
'\'s\' - save db',
'\'S\' - save db with alternative filepath',
'\'c\' - copy password of current entry',
'\'b\' - copy username of current entry',
'\'H\' - show password of current entry',
'\'o\' - open URL of entry in standard webbrowser',
'\'P\' - edit db password',
'\'g\' - create group',
'\'G\' - create subgroup',
'\'y\' - create entry',
'\'d\' - delete group or entry (depends on what is marked)',
'\'t\' - edit title of selected group or entry',
'\'u\' - edit username',
'\'p\' - edit password',
'\'U\' - edit URL',
'\'C\' - edit comment',
'\'E\' - edit expiration date',
'\'f\' or \'/\' - find entry by title',
'\'L\' - lock db',
'\'m\' - enter move mode for marked group or entry',
'\'r\' - reload remote database (no function if not remote)',
'Navigate with arrow keys or h/j/k/l like in vim',
'Type \'return\' to enter subgroups',
'Type \'backspace\' to go back to parent',
'Type \'F5\' in a dialog to return to the previous one',
'\n',
'Press return.')
def move_help(self):
self.draw_help(
'\'e\' - go to main menu',
'\'q\' - close program',
'\'CTRL+D\' or \'CTRL+C\' - close program at any time',
'Navigate up or down with arrow keys or k and j',
'Navigate to subgroup with right arrow key or h',
'Navigate to parent with left arrow key or l',
'Type \'return\' to move the group to marked parent or the entry',
'\tto the marked group',
'Type \'backspace\' to move a group to the root',
'Type \'ESC\' to abort moving',
'\n',
'Press return.')
def show_dir(self, highlight, dir_cont):
'''List a directory with highlighting.'''
self.draw_text(changed=False)
for i in range(len(dir_cont)):
if i == highlight:
if isdir(self.cur_dir + '/' + dir_cont[i]):
try:
self.stdscr.addstr(
i + 1, 0, dir_cont[i], cur.color_pair(5))
except:
pass
else:
try:
self.stdscr.addstr(
i + 1, 0, dir_cont[i], cur.color_pair(3))
except:
pass
else:
if isdir(self.cur_dir + '/' + dir_cont[i]):
try:
self.stdscr.addstr(
i + 1, 0, dir_cont[i], cur.color_pair(4))
except:
pass
else:
try:
self.stdscr.addstr(i + 1, 0, dir_cont[i])
except:
pass
self.stdscr.refresh()
def close(self):
'''Close the program correctly.'''
if self.config['rem_key'] is False and isfile(self.key_home):
remove(self.key_home)
cur.nocbreak()
self.stdscr.keypad(0)
cur.endwin()
exit()
def show_groups(self, highlight, groups, cur_win, offset, changed, parent):
'''Just print all groups in a column'''
self.draw_text(changed)
self.group_win.clear()
if parent is self.db.root_group:
root_title = 'Parent: _ROOT_'
else:
root_title = 'Parent: ' + parent.title
if cur_win == 0:
h_color = 5
n_color = 4
else:
h_color = 6
n_color = 1
try:
ysize = self.group_win.getmaxyx()[0]
self.group_win.addstr(0, 0, root_title,
cur.color_pair(n_color))
if groups:
if len(groups) <= ysize - 3:
num = len(groups)
else:
num = ysize - 3
for i in range(num):
if highlight == i + offset:
if groups[i + offset].children:
title = '+' + groups[i + offset].title
else:
title = ' ' + groups[i + offset].title
self.group_win.addstr(i + 1, 0, title,
cur.color_pair(h_color))
else:
if groups[i + offset].children:
title = '+' + groups[i + offset].title
else:
title = ' ' + groups[i + offset].title
self.group_win.addstr(i + 1, 0, title,
cur.color_pair(n_color))
x_of_n = str(highlight + 1) + ' of ' + str(len(groups))
self.group_win.addstr(ysize - 2, 0, x_of_n)
except:
pass
finally:
self.group_win.refresh()
def show_entries(self, e_highlight, entries, cur_win, offset):
'''Just print all entries in a column'''
self.info_win.clear()
try:
self.entry_win.clear()
if entries:
if cur_win == 1:
h_color = 5
n_color = 4
else:
h_color = 6
n_color = 1
ysize = self.entry_win.getmaxyx()[0]
if len(entries) <= ysize - 3:
num = len(entries)
else:
num = ysize - 3
for i in range(num):
title = entries[i + offset].title
if date.today() > entries[i + offset].expire.date():
expired = True
else:
expired = False
if e_highlight == i + offset:
if expired is True:
self.entry_win.addstr(i, 2, title,
cur.color_pair(3))
else:
self.entry_win.addstr(i, 2, title,
cur.color_pair(h_color))
else:
if expired is True:
self.entry_win.addstr(i, 2, title,
cur.color_pair(7))
else:
self.entry_win.addstr(i, 2, title,
cur.color_pair(n_color))
self.entry_win.addstr(ysize - 2, 2, (str(e_highlight + 1) +
' of ' +
str(len(entries))))
except:
pass
finally:
self.entry_win.noutrefresh()
try:
if entries:
xsize = self.entry_win.getmaxyx()[1]
entry = entries[e_highlight]
if entry.title is None:
title = ""
elif len(entry.title) > xsize:
title = entry.title[:xsize - 2] + '\\'
else:
title = entry.title
if entry.group.title is None:
group_title = ""
elif len(entry.group.title) > xsize:
group_title = entry.group.title[:xsize - 9] + '\\'
else:
group_title = entry.group.title
if entry.username is None:
username = ""
elif len(entry.username) > xsize:
username = entry.username[:xsize - 12] + '\\'
else:
username = entry.username
if entry.url is None:
url = ""
elif len(entry.url) > xsize:
url = entry.title[:xsize - 7] + '\\'
else:
url = entry.url
if entry.creation is None:
creation = ""
else:
creation = entry.creation.__str__()[:10]
if entry.last_access is None:
last_access = ""
else:
last_access = entry.last_access.__str__()[:10]
if entry.last_mod is None:
last_mod = ""
else:
last_mod = entry.last_mod.__str__()[:10]
if entry.expire is None:
expire = ""
else:
if entry.expire.__str__()[:19] == '2999-12-28 23:59:59':
expire = "Expires: Never"
else:
expire = "Expires: " + entry.expire.__str__()[:10]
if entry.comment is None:
comment = ""
else:
comment = entry.comment
self.info_win.addstr(2, 0, title, cur.A_BOLD)
self.info_win.addstr(3, 0, "Group: " + group_title)
self.info_win.addstr(4, 0, "Username: " + username)
self.info_win.addstr(5, 0, "URL: " + url)
self.info_win.addstr(6, 0, "Creation: " + creation)
self.info_win.addstr(7, 0, "Access: " + last_access)
self.info_win.addstr(8, 0, "Modification: " + last_mod)
self.info_win.addstr(9, 0, expire)
if date.today() > entry.expire.date():
self.info_win.addstr(9, 22, ' (expired)')
if '\n' in comment:
comment = comment.split('\n')[0]
dots = ' ...'
else:
dots = ''
self.info_win.addstr(10, 0, "Comment: " + comment + dots)
except:
pass
finally:
self.info_win.noutrefresh()
cur.doupdate()
|
wechat_django/admin/utils.py | UltraVacuum/wechat-django | 166 | 12723434 | <reponame>UltraVacuum/wechat-django
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from six.moves import reduce
from six.moves.urllib.parse import parse_qsl
def anchor(text, href, **kwargs):
"""转化为a标签"""
@mark_safe
def wrapper(modeladmin, obj):
kwargs.update(
href=href,
text=text
)
for key, value in kwargs.items():
if callable(value):
kwargs[key] = value(modeladmin, obj)
return kwargs["text"] and '<a href="{href}">{text}</a>'.format(**kwargs)
return wrapper
def foreignkey(field_name):
"""
Converts a foreign key value into clickable links.
If field_name is 'parent', link text will be str(obj.parent)
Link will be admin url for the admin url for obj.parent.id:change
"""
@mark_safe
def _linkify(obj):
app_label = obj._meta.app_label
linked_obj = getattr(obj, field_name)
model_name = linked_obj._meta.model_name
view_name = "admin:{app_label}_{model_name}_change".format(
app_label=app_label,
model_name=model_name
)
link_url = reverse(view_name, kwargs=dict(
object_id=linked_obj.id,
wechat_app_id=obj.app_id
))
return '<a href="{0}">{1}</a>'.format(link_url, linked_obj)
_linkify.short_description = _(field_name)
_linkify.admin_order_field = field_name
return _linkify
def list_property(field_name, **kwargs):
def _from_property(obj):
rv = reduce(getattr, field_name.split("."), obj)
return rv() if callable(rv) else rv
for key, value in kwargs.items():
setattr(_from_property, key, value)
return _from_property
def field_property(field_name, **kwargs):
def _from_property(admin, obj=None):
if not obj:
return None
rv = reduce(getattr, field_name.split("."), obj)
return rv() if callable(rv) else rv
for key, value in kwargs.items():
setattr(_from_property, key, value)
return _from_property
def get_request_params(request, param):
"""从请求信息中获取想要的信息"""
if not hasattr(request, param):
preserved_filters_str = request.GET.get('_changelist_filters')
if preserved_filters_str:
preserved_filters = dict(parse_qsl(preserved_filters_str))
else:
preserved_filters = dict()
value = (request.GET.get(param)
or preserved_filters.get(param))
setattr(request, param, value)
return getattr(request, param)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.