filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_27205
|
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
from model import *
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]')
parser.add_argument('--batch_size', type=int, default=24, help='Batch Size during training [default: 24]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')
parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
parser.add_argument('--test_area', type=int, default=6, help='Which area to use for test, option: 1-6 [default: 6]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
NUM_POINT = FLAGS.num_point
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp model.py %s' % (LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 4096
NUM_CLASSES = 6
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
# BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
ALL_FILES = provider.getDataFiles('indoor3d_sem_seg_hdf5_data(6class)/all_files.txt')
room_filelist = [line.rstrip() for line in open('indoor3d_sem_seg_hdf5_data(6class)/room_filelist.txt')]
# Load ALL data
data_batch_list = []
label_batch_list = []
for h5_filename in ALL_FILES:
data_batch, label_batch = provider.loadDataFile(h5_filename)
data_batch_list.append(data_batch)
label_batch_list.append(label_batch)
data_batches = np.concatenate(data_batch_list, 0)
label_batches = np.concatenate(label_batch_list, 0)
print(data_batches.shape)
print(label_batches.shape)
test_area = 'Area_'+str(FLAGS.test_area)
train_idxs = []
test_idxs = []
for i, room_name in enumerate(room_filelist):
if test_area in room_name:
test_idxs.append(i)
else:
train_idxs.append(i)
train_data = data_batches[train_idxs, ...]
train_label = label_batches[train_idxs]
test_data = data_batches[test_idxs, ...]
test_label = label_batches[test_idxs]
print(train_data.shape, train_label.shape)
print(test_data.shape, test_label.shape)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred = get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
loss = get_loss(pred, labels_pl)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = True
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
sess.run(init, {is_training_pl:True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
log_string('----')
current_data, current_label, _ = provider.shuffle_data(train_data[:, 0:NUM_POINT, :], train_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
if batch_idx % 100 == 0:
print('Current batch/total batch num: %d/%d'%(batch_idx, num_batches))
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']],
feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += loss_val
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
log_string('----')
current_data = test_data[:, 0:NUM_POINT, :]
current_label = np.squeeze(test_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
# print('start_idx: ', start_idx)
# print('end_idx: ', end_idx)
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred']],
feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
# print('current label:', current_label)
# print('correct: ', correct)
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
for j in range(NUM_POINT):
l = current_label[i, j]
# print('current_label[i,j]:', l)
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx, j] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT)))
log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class, dtype=np.float))))
if __name__ == "__main__":
train()
LOG_FOUT.close()
|
the-stack_0_27206
|
# coding=utf-8
# pynput
# Copyright (C) 2015-2019 Moses Palmér
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
The module containing mouse classes.
See the documentation for more information.
"""
# pylint: disable=C0103
# Button, Controller and Listener are not constants
import os
import sys
if os.environ.get('__PYNPUT_GENERATE_DOCUMENTATION') == 'yes':
from ._base import Button, Controller, Listener
else:
Button = None
Controller = None
Listener = None
from pynput._util import Events
if sys.platform == 'darwin':
if not Button and not Controller and not Listener:
from ._darwin import Button, Controller, Listener
elif sys.platform == 'win32':
if not Button and not Controller and not Listener:
from ._win32 import Button, Controller, Listener
else:
if not Button and not Controller and not Listener:
try:
from ._xorg import Button, Controller, Listener
except ImportError:
# For now, since we only support Xlib anyway, we re-raise these
# errors to allow users to determine the cause of failures to import
raise
if not Button or not Controller or not Listener:
raise ImportError('this platform is not supported')
class Events(Events):
"""A mouse event listener supporting synchronous iteration over the events.
Possible events are:
:class:`Events.Move`
The mouse was moved.
:class:`Events.Click`
A mouse button was pressed or released.
:class:`Events.Scroll`
The device was scrolled.
"""
_Listener = Listener
class Move(Events.Event):
"""A move event.
"""
def __init__(self, x, y):
#: The X screen coordinate.
self.x = x
#: The Y screen coordinate.
self.y = y
class Click(Events.Event):
"""A click event.
"""
def __init__(self, x, y, button, pressed):
#: The X screen coordinate.
self.x = x
#: The Y screen coordinate.
self.y = y
#: The button.
self.button = button
#: Whether the button was pressed.
self.pressed = pressed
class Scroll(Events.Event):
"""A scoll event.
"""
def __init__(self, x, y, dx, dy):
#: The X screen coordinate.
self.x = x
#: The Y screen coordinate.
self.y = y
#: The number of horisontal steps.
self.dx = dx
#: The number of vertical steps.
self.dy = dy
def __init__(self):
super(Events, self).__init__(
on_move=self.Move,
on_click=self.Click,
on_scroll=self.Scroll)
|
the-stack_0_27207
|
import os
import warnings
import joblib
import yaml
import numpy as np
from matplotlib import pyplot as plt
from skimage import morphology, segmentation, measure, color, img_as_float
from mathtools import utils
from visiontools import imageprocessing
def removeTargetModel(seg_image, num):
""" Find and remove the target model from a segment image. """
seg_centroids = np.row_stack(tuple(
np.column_stack(np.nonzero(seg_image == i)).mean(axis=0)
for i in range(1, num + 1)
))
direction = np.array([3, 4])
seg_scores = seg_centroids @ direction
# Segment labels are one-indexed
best_idx = np.array(seg_scores).argmax() + 1
seg_image[seg_image == best_idx] = 0
seg_image = segmentation.relabel_sequential(seg_image)[0]
return seg_image, num - 1
def makeCoarseSegmentLabels(mask, min_size=100):
mask = morphology.remove_small_objects(mask, min_size=min_size, connectivity=1)
labels, num = measure.label(mask.astype(int), return_num=True)
if num < 2:
return labels
labels, num = removeTargetModel(labels, num)
return labels
def makeFineSegmentLabels(coarse_seg_labels, bg_mask_sat, min_size=100):
labels, num = measure.label(coarse_seg_labels, return_num=True)
for i in range(1, num + 1):
in_seg = labels == i
bg_vals = bg_mask_sat[in_seg]
class_counts = np.hstack((np.sum(bg_vals == 0), np.sum(bg_vals == 1)))
is_bg = class_counts.argmax().astype(bool)
if is_bg:
labels[in_seg] = 0
fg_mask = morphology.remove_small_objects(labels != 0, min_size=min_size, connectivity=1)
labels, num = measure.label(fg_mask.astype(int), return_num=True)
for i in range(1, num + 1):
in_seg = labels == i
labels[in_seg] = coarse_seg_labels[in_seg]
labels = segmentation.relabel_sequential(labels)[0]
return labels
def makeHsvFrame(rgb_image):
rgb_image = img_as_float(rgb_image)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero")
hsv_image = color.rgb2hsv(rgb_image)
return hsv_image
def plotHsvHist(hsv_frame_seq, seg_labels_seq, file_path=None):
fg = hsv_frame_seq[seg_labels_seq != 0]
names = ('hue', 'sat', 'val')
fig, axes = plt.subplots(3)
for i in range(3):
axes[i].hist(fg[:, i], bins=100)
axes[i].set_ylabel(names[i])
plt.tight_layout()
plt.savefig(file_path)
plt.close()
def main(
out_dir=None, data_dir=None, person_masks_dir=None, bg_masks_dir=None,
sat_thresh=1, start_from=None, stop_at=None, num_disp_imgs=None):
out_dir = os.path.expanduser(out_dir)
data_dir = os.path.expanduser(data_dir)
person_masks_dir = os.path.expanduser(person_masks_dir)
bg_masks_dir = os.path.expanduser(bg_masks_dir)
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_data_dir = os.path.join(out_dir, 'data')
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
def loadFromDir(var_name, dir_name):
return joblib.load(os.path.join(dir_name, f"{var_name}.pkl"))
def saveToWorkingDir(var, var_name):
joblib.dump(var, os.path.join(out_data_dir, f"{var_name}.pkl"))
trial_ids = utils.getUniqueIds(data_dir, prefix='trial=', to_array=True)
for seq_idx, trial_id in enumerate(trial_ids):
if start_from is not None and seq_idx < start_from:
continue
if stop_at is not None and seq_idx > stop_at:
break
trial_str = f"trial={trial_id}"
logger.info(f"Processing video {seq_idx + 1} / {len(trial_ids)} (trial {trial_id})")
logger.info(" Loading data...")
rgb_frame_seq = loadFromDir(f'{trial_str}_rgb-frame-seq', data_dir)
person_mask_seq = loadFromDir(f'{trial_str}_person-mask-seq', person_masks_dir)
bg_mask_seq_depth = loadFromDir(f'{trial_str}_bg-mask-seq-depth', bg_masks_dir)
# bg_mask_seq_rgb = loadFromDir(f'{trial_str}_bg-mask-seq-rgb', bg_masks_dir)
logger.info(" Making segment labels...")
fg_mask_seq = ~bg_mask_seq_depth
seg_labels_seq = np.stack(tuple(map(makeCoarseSegmentLabels, fg_mask_seq)), axis=0)
hsv_frame_seq = np.stack(tuple(map(makeHsvFrame, rgb_frame_seq)), axis=0)
sat_frame_seq = hsv_frame_seq[..., 1]
bg_mask_seq_sat = sat_frame_seq < sat_thresh
seg_labels_seq[person_mask_seq] = 0
seg_labels_seq = np.stack(
tuple(
makeFineSegmentLabels(segs, sat)
for segs, sat in zip(seg_labels_seq, bg_mask_seq_sat)
),
axis=0
)
logger.info(" Saving output...")
saveToWorkingDir(seg_labels_seq.astype(np.uint8), f'{trial_str}_seg-labels-seq')
plotHsvHist(
hsv_frame_seq, seg_labels_seq,
file_path=os.path.join(fig_dir, f'{trial_str}_hsv-hists.png')
)
if num_disp_imgs is not None:
if rgb_frame_seq.shape[0] > num_disp_imgs:
idxs = np.arange(rgb_frame_seq.shape[0])
np.random.shuffle(idxs)
idxs = idxs[:num_disp_imgs]
else:
idxs = slice(None, None, None)
imageprocessing.displayImages(
*(rgb_frame_seq[idxs]),
*(bg_mask_seq_sat[idxs]),
*(bg_mask_seq_depth[idxs]),
*(person_mask_seq[idxs]),
*(seg_labels_seq[idxs]),
num_rows=5, file_path=os.path.join(fig_dir, f'{trial_str}_best-frames.png')
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
|
the-stack_0_27208
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, Fraunhofer FKIE/CMS, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function, unicode_literals
import rospy
from python_qt_binding.QtCore import Signal
import fkie_node_manager_daemon.remote as remote
import fkie_node_manager_daemon.settings_stub as scstub
from fkie_node_manager_daemon import url as nmdurl
from .channel_interface import ChannelInterface
class SettingsChannel(ChannelInterface):
yaml_config_signal = Signal(str, str)
'''
:ivar str,str yaml_config_signal: signal emit YAML configuration from daemon {YAML string, grpc_url}.
'''
def clear_cache(self, grpc_url=''):
pass
def get_settings_manager(self, uri='localhost:12321'):
channel = remote.get_insecure_channel(uri)
if channel is not None:
return scstub.SettingsStub(channel)
raise Exception("Node manager daemon '%s' not reachable" % uri)
def get_config_threaded(self, grpc_url='grpc://localhost:12321'):
self._threads.start_thread("gcfgt_%s" % grpc_url, target=self.get_config, args=(grpc_url, True))
def get_config(self, grpc_url='grpc://localhost:12321', threaded=False):
rospy.logdebug("get config from %s" % (grpc_url))
uri, _ = nmdurl.split(grpc_url)
sm = self.get_settings_manager(uri)
try:
yaml_cfg = sm.get_config()
if threaded:
self.yaml_config_signal.emit(yaml_cfg, grpc_url)
self._threads.finished("gcfgt_%s" % grpc_url)
return yaml_cfg
except Exception as e:
self.error.emit("get_config", "grpc://%s" % uri, "", e)
def set_config(self, grpc_url='grpc://localhost:12321', data=''):
rospy.logdebug("set config to %s" % (grpc_url))
uri, _ = nmdurl.split(grpc_url)
sm = self.get_settings_manager(uri)
sm.set_config(data)
|
the-stack_0_27210
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import textwrap
import unittest
from pants.option.config import Config
from pants.util.contextutil import temporary_file
class ConfigTest(unittest.TestCase):
def setUp(self):
with temporary_file() as ini1:
ini1.write(textwrap.dedent(
"""
[DEFAULT]
name: foo
answer: 42
scale: 1.2
path: /a/b/%(answer)s
embed: %(path)s::foo
disclaimer:
Let it be known
that.
blank_section:
[a]
list: [1, 2, 3, %(answer)s]
listappend: +[7, 8, 9]
[b]
preempt: True
dict: {
'a': 1,
'b': %(answer)s,
'c': ['%(answer)s', %(answer)s]
}
"""))
ini1.close()
with temporary_file() as ini2:
ini2.write(textwrap.dedent(
"""
[a]
fast: True
[b]
preempt: False
[defined_section]
"""))
ini2.close()
self.config = Config.load(configpaths=[ini1.name, ini2.name])
def test_getstring(self):
self.assertEquals('/a/b/42', self.config.get('a', 'path'))
self.assertEquals('/a/b/42::foo', self.config.get('a', 'embed'))
self.assertEquals('[1, 2, 3, 42]', self.config.get('a', 'list'))
self.assertEquals('+[7, 8, 9]', self.config.get('a', 'listappend'))
self.assertEquals(
"""
Let it be known
that.""",
self.config.get('b', 'disclaimer'))
self._check_defaults(self.config.get, '')
self._check_defaults(self.config.get, '42')
def test_default_section_fallback(self):
self.assertEquals('foo', self.config.get('defined_section', 'name'))
self.assertEquals('foo', self.config.get('not_a_defined_section', 'name'))
def test_sections(self):
self.assertEquals(['a', 'b', 'defined_section'], self.config.sections())
def test_empty(self):
config = Config.load([])
self.assertEquals([], config.sections())
def _check_defaults(self, accessor, default):
self.assertEquals(None, accessor('c', 'fast'))
self.assertEquals(None, accessor('c', 'preempt', None))
self.assertEquals(default, accessor('c', 'jake', default=default))
|
the-stack_0_27211
|
import torch
import os
from network.base_net import RNN
from network.vdn_net import VDNNet
class VDN:
def __init__(self, args):
self.n_actions = args.n_actions
self.n_agents = args.n_agents
self.state_shape = args.state_shape
self.obs_shape = args.obs_shape
input_shape = self.obs_shape
# 根据参数决定RNN的输入维度
if args.last_action:
input_shape += self.n_actions
if args.reuse_network:
input_shape += self.n_agents
# 神经网络
self.eval_rnn = RNN(input_shape, args) # 每个agent选动作的网络
self.target_rnn = RNN(input_shape, args)
self.eval_vdn_net = VDNNet() # 把agentsQ值加起来的网络
self.target_vdn_net = VDNNet()
self.args = args
if self.args.cuda:
self.eval_rnn.cuda()
self.target_rnn.cuda()
self.eval_vdn_net.cuda()
self.target_vdn_net.cuda()
self.model_dir = args.model_dir + '/' + args.alg + '/' + args.map
# 如果存在模型则加载模型
if self.args.load_model:
if os.path.exists(self.model_dir + '/rnn_net_params.pkl'):
path_rnn = self.model_dir + '/rnn_net_params.pkl'
path_vdn = self.model_dir + '/vdn_net_params.pkl'
map_location = 'cuda:0' if self.args.cuda else 'cpu'
self.eval_rnn.load_state_dict(torch.load(path_rnn, map_location=map_location))
self.eval_vdn_net.load_state_dict(torch.load(path_vdn, map_location=map_location))
print('Successfully load the model: {} and {}'.format(path_rnn, path_vdn))
else:
raise Exception("No model!")
# 让target_net和eval_net的网络参数相同
self.target_rnn.load_state_dict(self.eval_rnn.state_dict())
self.target_vdn_net.load_state_dict(self.eval_vdn_net.state_dict())
self.eval_parameters = list(self.eval_vdn_net.parameters()) + list(self.eval_rnn.parameters())
if args.optimizer == "RMS":
self.optimizer = torch.optim.RMSprop(self.eval_parameters, lr=args.lr)
# 执行过程中,要为每个agent都维护一个eval_hidden
# 学习过程中,要为每个episode的每个agent都维护一个eval_hidden、target_hidden
self.eval_hidden = None
self.target_hidden = None
print('Init alg VDN')
def learn(self, batch, max_episode_len, train_step, epsilon=None): # train_step表示是第几次学习,用来控制更新target_net网络的参数
'''
在learn的时候,抽取到的数据是四维的,四个维度分别为 1——第几个episode 2——episode中第几个transition
3——第几个agent的数据 4——具体obs维度。因为在选动作时不仅需要输入当前的inputs,还要给神经网络输入hidden_state,
hidden_state和之前的经验相关,因此就不能随机抽取经验进行学习。所以这里一次抽取多个episode,然后一次给神经网络
传入每个episode的同一个位置的transition
'''
episode_num = batch['o'].shape[0]
self.init_hidden(episode_num)
for key in batch.keys(): # 把batch里的数据转化成tensor
if key == 'u':
batch[key] = torch.tensor(batch[key], dtype=torch.long)
else:
batch[key] = torch.tensor(batch[key], dtype=torch.float32)
# TODO pymarl中取得经验没有取最后一条,找出原因
u, r, avail_u, avail_u_next, terminated = batch['u'], batch['r'], batch['avail_u'], \
batch['avail_u_next'], batch['terminated']
mask = 1 - batch["padded"].float() # 用来把那些填充的经验的TD-error置0,从而不让它们影响到学习
if self.args.cuda:
u = u.cuda()
r = r.cuda()
mask = mask.cuda()
terminated = terminated.cuda()
# 得到每个agent对应的Q值,维度为(episode个数, max_episode_len, n_agents,n_actions)
q_evals, q_targets = self.get_q_values(batch, max_episode_len)
# 取每个agent动作对应的Q值,并且把最后不需要的一维去掉,因为最后一维只有一个值了
q_evals = torch.gather(q_evals, dim=3, index=u).squeeze(3)
# 得到target_q
q_targets[avail_u_next == 0.0] = - 9999999
q_targets = q_targets.max(dim=3)[0]
q_total_eval = self.eval_vdn_net(q_evals)
q_total_target = self.target_vdn_net(q_targets)
targets = r + self.args.gamma * q_total_target * (1 - terminated)
td_error = targets.detach() - q_total_eval
masked_td_error = mask * td_error # 抹掉填充的经验的td_error
# loss = masked_td_error.pow(2).mean()
# 不能直接用mean,因为还有许多经验是没用的,所以要求和再比真实的经验数,才是真正的均值
loss = (masked_td_error ** 2).sum() / mask.sum()
# print('Loss is ', loss)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.eval_parameters, self.args.grad_norm_clip)
self.optimizer.step()
if train_step > 0 and train_step % self.args.target_update_cycle == 0:
self.target_rnn.load_state_dict(self.eval_rnn.state_dict())
self.target_vdn_net.load_state_dict(self.eval_vdn_net.state_dict())
def _get_inputs(self, batch, transition_idx):
# 取出所有episode上该transition_idx的经验,u_onehot要取出所有,因为要用到上一条
obs, obs_next, u_onehot = batch['o'][:, transition_idx], \
batch['o_next'][:, transition_idx], batch['u_onehot'][:]
episode_num = obs.shape[0]
inputs, inputs_next = [], []
inputs.append(obs)
inputs_next.append(obs_next)
# 给obs添加上一个动作、agent编号
if self.args.last_action:
if transition_idx == 0: # 如果是第一条经验,就让前一个动作为0向量
inputs.append(torch.zeros_like(u_onehot[:, transition_idx]))
else:
inputs.append(u_onehot[:, transition_idx - 1])
inputs_next.append(u_onehot[:, transition_idx])
if self.args.reuse_network:
# 因为当前的obs三维的数据,每一维分别代表(episode,agent,obs维度),直接在dim_1上添加对应的向量
# 即可,比如给agent_0后面加(1, 0, 0, 0, 0),表示5个agent中的0号。而agent_0的数据正好在第0行,那么需要加的
# agent编号恰好就是一个单位矩阵,即对角线为1,其余为0
inputs.append(torch.eye(self.args.n_agents).unsqueeze(0).expand(episode_num, -1, -1))
inputs_next.append(torch.eye(self.args.n_agents).unsqueeze(0).expand(episode_num, -1, -1))
# 要把obs中的三个拼起来,并且要把episode_num个episode、self.args.n_agents个agent的数据拼成episode_num*n_agents条数据
# 因为这里所有agent共享一个神经网络,每条数据中带上了自己的编号,所以还是自己的数据
inputs = torch.cat([x.reshape(episode_num * self.args.n_agents, -1) for x in inputs], dim=1)
inputs_next = torch.cat([x.reshape(episode_num * self.args.n_agents, -1) for x in inputs_next], dim=1)
return inputs, inputs_next
def get_q_values(self, batch, max_episode_len):
episode_num = batch['o'].shape[0]
q_evals, q_targets = [], []
for transition_idx in range(max_episode_len):
inputs, inputs_next = self._get_inputs(batch, transition_idx) # 给obs加last_action、agent_id
if self.args.cuda:
inputs = inputs.cuda()
inputs_next = inputs_next.cuda()
self.eval_hidden = self.eval_hidden.cuda()
self.target_hidden = self.target_hidden.cuda()
q_eval, self.eval_hidden = self.eval_rnn(inputs, self.eval_hidden) # 得到的q_eval维度为(episode_num*n_agents, n_actions)
q_target, self.target_hidden = self.target_rnn(inputs_next, self.target_hidden)
# 把q_eval维度重新变回(episode_num, n_agents, n_actions)
q_eval = q_eval.view(episode_num, self.n_agents, -1)
q_target = q_target.view(episode_num, self.n_agents, -1)
q_evals.append(q_eval)
q_targets.append(q_target)
# 得的q_eval和q_target是一个列表,列表里装着max_episode_len个数组,数组的的维度是(episode个数, n_agents,n_actions)
# 把该列表转化成(episode个数, max_episode_len, n_agents,n_actions)的数组
q_evals = torch.stack(q_evals, dim=1)
q_targets = torch.stack(q_targets, dim=1)
return q_evals, q_targets
def init_hidden(self, episode_num):
# 为每个episode中的每个agent都初始化一个eval_hidden、target_hidden
self.eval_hidden = torch.zeros((episode_num, self.n_agents, self.args.rnn_hidden_dim))
self.target_hidden = torch.zeros((episode_num, self.n_agents, self.args.rnn_hidden_dim))
def save_model(self, train_step):
num = str(train_step // self.args.save_cycle)
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
torch.save(self.eval_vdn_net.state_dict(), self.model_dir + '/' + num + '_vdn_net_params.pkl')
torch.save(self.eval_rnn.state_dict(), self.model_dir + '/' + num + '_rnn_net_params.pkl')
|
the-stack_0_27213
|
from typing import List, Optional, Tuple
from math import floor
from PySide6.QtCore import QPoint, QRect, QSize, Qt
from PySide6.QtWidgets import QLayout, QSizePolicy, QStyle, QLayoutItem, QWidget
class FlowLayout(QLayout):
def __init__(self, parent: Optional[QWidget] = None, spacing: Optional[QSize] = None) -> None:
super().__init__(parent)
self._spacing = spacing if spacing else QSize(-1, -1)
self._items: List[QLayoutItem] = []
def __del__(self) -> None:
del self._items[:]
def addItem(self, item: QLayoutItem) -> None:
self._items.append(item)
def horizontalSpacing(self) -> int:
if self._spacing.width() >= 0:
return self._spacing.width()
else:
return self.smartSpacing(QStyle.PM_LayoutHorizontalSpacing)
def verticalSpacing(self) -> int:
if self._spacing.height() >= 0:
return self._spacing.height()
else:
return self.smartSpacing(QStyle.PM_LayoutVerticalSpacing)
def smartSpacing(self, metric: QStyle) -> int:
parent = self.parent()
if parent is None:
return -1
elif parent.isWidgetType():
return parent.style().pixelMetric(metric, None, parent)
else:
return parent.spacing()
def count(self) -> int:
return len(self._items)
def itemAt(self, index: int) -> QLayoutItem:
if 0 <= index < len(self._items):
return self._items[index]
return None # type: ignore
def takeAt(self, index: int) -> QLayoutItem:
if 0 <= index < len(self._items):
return self._items.pop(index)
return None # type: ignore
def expandingDirections(self) -> Qt.Orientations:
return Qt.Horizontal
def hasHeightForWidth(self) -> bool:
return True
def heightForWidth(self, width: int) -> int:
return self.doLayout(QRect(0, 0, width, 0), True)
def setGeometry(self, rect: QRect) -> None:
super().setGeometry(rect)
self.doLayout(rect, False)
def sizeHint(self) -> QSize:
return self.minimumSize()
def minimumSize(self) -> QSize:
size = QSize()
for item in self._items:
size = size.expandedTo(item.minimumSize())
left, top, right, bottom = self.getContentsMargins()
size += QSize(left + right, top + bottom)
return size
def doLayout(self, rect: QRect, testonly: bool) -> int:
left, top, right, bottom = self.getContentsMargins()
effective = rect.adjusted(+left, +top, -right, -bottom)
x = effective.x()
y = effective.y()
lines: List[Tuple[int, List[Tuple[QLayoutItem, QPoint]]]] = []
line = 0
lineheight = 0
for item in self._items:
widget = item.widget()
hspace = self.horizontalSpacing()
if hspace == -1:
hspace = widget.style().layoutSpacing(
QSizePolicy.Preferred,
QSizePolicy.Preferred,
Qt.Horizontal
)
vspace = self.verticalSpacing()
if vspace == -1:
vspace = widget.style().layoutSpacing(
QSizePolicy.Preferred,
QSizePolicy.Preferred,
Qt.Vertical
)
nextX = x + item.sizeHint().width() + hspace
if nextX - hspace > effective.right() and lineheight > 0:
x = effective.x()
y = y + lineheight + vspace
nextX = x + item.sizeHint().width() + hspace
lineheight = 0
line += 1
lineheight = max(lineheight, item.sizeHint().height())
if not testonly:
if len(lines) <= line:
lines.append((lineheight, []))
else:
lines[line] = (max(lines[line][0], lineheight), lines[line][1])
lines[line][1].append((item, QPoint(x, y)))
x = nextX
if not testonly:
for maxlineheight, current in lines:
if len(current) > 1:
linewidth = sum(line[0].sizeHint().width() for line in current)
spacing = floor((effective.right() - linewidth) / (len(current) - 1)) - hspace
else:
spacing = 0
for i, (item, point) in enumerate(current):
item.setGeometry(QRect(
QPoint(
point.x() + (spacing if i > 0 else 0),
point.y() + floor((maxlineheight - item.sizeHint().height()) / 2)
),
item.sizeHint() if len(current) > 1
else QSize(effective.width(), item.sizeHint().height()))
)
return y + lineheight - rect.y() + bottom
|
the-stack_0_27216
|
from mps_database.mps_config import MPSConfig, models
class MpsName:
"""
This class helps build PV names for MPS componets.
"""
def __init__(self, session):
self.session = session
def getDeviceInputNameFromId(self, deviceInputId):
"""
Builds the PV base name for the specified DeviceInput ID (see getDeviceInputBaseName())
:type deviceInputId: int
:rtype :string
"""
deviceInput = self.session.query(models.DeviceInput).filter(models.DeviceInput.id==deviceInputId).one()
return self.getDeviceInputName(deviceInput)
def getDeviceInputBaseName(self, deviceInput):
"""
Builds the PV base name for the specified DeviceInput. The PV
name of the DeviceInput is composed of
<DeviceType.name> : <Device.area> : <Device.position>
Example: PROF:GUNB:753
The full PV name for the DeviceInput requires the fourth field, which
is given by the Channel associated with the DeviceInput.
:type deviceInput: models.DeviceInput
:rtype :string
"""
digitalChannel = self.session.query(models.DigitalChannel).filter(models.DigitalChannel.id==deviceInput.channel_id).one()
device = self.session.query(models.DigitalDevice).filter(models.DigitalDevice.id==deviceInput.digital_device_id).one()
if device.measured_device_type_id == None:
deviceType = self.session.query(models.DeviceType).filter(models.DeviceType.id==device.device_type_id).one()
else:
deviceType = self.session.query(models.DeviceType).filter(models.DeviceType.id==device.measured_device_type_id).one()
return deviceType.name + ":" + device.area + ":" + str(device.position)
def getDeviceInputName(self, deviceInput):
"""
Builds the full DeviceInput PV name.
name of the DeviceInput is composed of
<DeviceType.name> : <Device.area> : <Device.position> : <Channel.name>
Example: PROF:GUNB:753:IN_SWITCH
:type deviceInput: models.DeviceInput
:rtype :string
"""
digitalChannel = self.session.query(models.DigitalChannel).filter(models.DigitalChannel.id==deviceInput.channel_id).one()
device = self.session.query(models.DigitalDevice).filter(models.DigitalDevice.id==deviceInput.digital_device_id).one()
if device.measured_device_type_id == None:
deviceType = self.session.query(models.DeviceType).filter(models.DeviceType.id==device.device_type_id).one()
else:
deviceType = self.session.query(models.DeviceType).filter(models.DeviceType.id==device.measured_device_type_id).one()
return deviceType.name + ":" + device.area + ":" + str(device.position) + ":" + digitalChannel.name
def getAnalogDeviceNameFromId(self, analogDeviceId):
"""
Builds the PV for an AnalogDevice.
:type analogDeviceId: int
:rtype :string
"""
analogDevice = self.session.query(models.AnalogDevice).filter(models.AnalogDevice.id==analogDeviceId).one()
return self.getAnalogDeviceName(analogDevice)
def getAnalogDeviceName(self, analogDevice):
"""
Builds the PV for an AnalogDevice in the format:
<DeviceType.name> : <AnalogDevice.area> : <AnalogDevice.position>
:type analogDevice: models.AnalogDevice
:rtype :string
"""
deviceType = self.session.query(models.DeviceType).filter(models.DeviceType.id==analogDevice.device_type_id).one()
return deviceType.name + ":" + analogDevice.area + ":" + str(analogDevice.position)
def getBypassPv(self):
return ''
def getThresholdPv(self, base, table, threshold, integrator, value_type, is_bpm=False):
"""
Builds the threashold PV for a given combination of table, threshold,
integrator and type, where:
* table: 'lc2' for LCLS-II tables
'alt' for LCLS-II ALT tables
'lc1' for LCLS-I tables
'idl' for idle tables (no beam)
* threshold: 't<0..7>' (for lc1 and idl the only threshold is t0.
* integrator: 'i<0..4>', if the device is a BPM (is_bpm=True) then
'i0'=='x', 'i1'=='y', 'i2'=='tmit'
* value_type: 'l' or 'h'
"""
if (is_bpm):
if (integrator == 'i0'):
integrator = 'x'
elif (integrator == 'i1'):
integrator = 'y'
elif (integrator == 'i2'):
integrator = 'tmit'
else:
return None
if (table == 'lc2'):
pv_name = base + ':' + integrator + '_' + threshold + '_' + value_type
else:
pv_name = base + ':' + integrator + '_' + threshold + '_' + table + '_' + value_type
return pv_name.upper()
def getBeamDestinationNameFromId(self, beamDestinationId):
beamDestination = self.session.query(models.BeamDestination).filter(models.BeamDestination.id==beamDestinationId).one()
return self.getBeamDestinationName(beamDestination)
def getBeamDestinationName(self, beamDestination):
return "$(BASE):" + beamDestination.name.upper() + "_PC"
def getFaultNameFromId(self, faultId):
fault = self.session.query(models.Fault).filter(models.Fault.id==fauldId).one()
return self.getFaultName(fault)
def getBaseFaultName(self, fault):
is_digital = False
if len(fault.inputs) <= 0:
print(('ERROR: Fault {0} (id={1}) has no inputs, please fix this error!'.format(fault.name, fault.id)))
return None
# print 'len: {0}'.format(len(fault.inputs))
for fault_input in fault.inputs:
# print 'id={0} bit={1} devid={2}'.format(fault_input.id,fault_input.bit_position, fault_input.device_id)
if fault_input.bit_position == 0:
try:
device = self.session.query(models.DigitalDevice).filter(models.DigitalDevice.id==fault_input.device_id).one()
# print 'Inputs: {0}'.format(len(device.inputs))
for input in device.inputs:
if input.bit_position == 0:
device_input = input
is_digital = True
except:
is_digital = False
if not is_digital:
try:
device = self.session.query(models.AnalogDevice).filter(models.AnalogDevice.id==fault_input.device_id).one()
except:
print(("Bonkers, device " + str(fault_input.device_id) + " is not digital nor analog - what?!?"))
#print "Bonkers, device " + str(fault_input.name) + " is not digital nor analog - what?!?"
if is_digital:
base = self.getDeviceInputBaseName(device_input)
else:
base = self.getAnalogDeviceName(device)
return base + ":" + fault.name
def getFaultName(self, fault):
base = self.getBaseFaultName(fault)
if base != None:
return base + "_FLT"
else:
return None
def getConditionName(self, condition):
return "$(BASE):" + condition.name.upper() + "_COND"
def getFaultStateName(self, faultState):
# print 'name for {0} {1} {2}'.format(faultState.id, faultState.device_state.name, faultState.fault.name)
return self.getBaseFaultName(faultState.fault) + ":" + faultState.device_state.name
#
# Figure out the PV base name for the Link Node, given a crate_id. There is
# one Link Node IOC per ATCA crate. The PV base name is:
#
# MPLN:<LOCA>:MP<NUM>
#
# where:
# LOCA: is the sector where the crate is installed (e.g. LI00, LI10, LTU...)
# NUM: index of the Link Node within LOCA (following LCLS-I convention)
# example, for LI01 sector there are four crates:
# L2KG01-1925 -> MPLN:LI01:MP01 (lowest elevation within rack)
# L2KG01-1931 -> MPLN:LI01:MP02
# L2KG01-1937 -> MPLN:LI01:MP03 (highest elevation within rack)
# L2KG01-2037 -> MPLN:LI01:MP11
#
def getLinkNodePv(self, crate_id):
return "MPLN:LI00:MP01"
|
the-stack_0_27217
|
import asyncio
import json
import ssl
from requests.exceptions import ConnectTimeout, ReadTimeout
import websockets
from websockets.client import WebSocketClientProtocol
from websockets.typing import Data
from .ws_message import WSMessage
from .ws_response import WSResponse
from .ws_timeout_error import WSTimeoutError
from .rest_request import RestRequest
class WSTest: # noqa: pylint - too-many-instance-attributes
'''
A class representing a websocket test runner
Attributes:
uri (str)
parameters (dict)
messages (list)
requests (list)
sent_messages (list)
sent_requests (list)
expected_responses (list)
received_responses (list)
received_json (list)
received_request_responses (list)
response_timeout (float)
message_timeout (float)
request_timeout (float)
test_timeout (float)
Methods:
with_parameter(key, value):
Adds a parameter and returns the WSTest
with_response(response: WSResponse):
Adds an expected response and returns the WSTest
with_message(message: WSMessage):
Adds a message to send and returns the WSTest
with_response_timeout(timeout: float):
Sets the response timeout in seconds and returns the WSTest
with_message_timeout(timeout: float):
Sets the message timeout in seconds and returns the WSTest
with_request_timeout(timeout: float):
Sets the request timeout in seconds and returns the WSTest
with_test_timeout(timeout: float):
Sets the overall test timeout in seconds and returns the WSTest
with_received_response_logging():
Enables websocket received response logging and returns the WSTest
with_request(request: RestRequest):
Adds a rest request and returns the WSTest
async run():
Runs the websocket tester with the current configuration
is_complete():
Checks whether the test has completed and returns the result as a bool
Usage:
ws_tester = (
WSTest('wss://example.com')
.with_parameter('Authorization', 'eyJh...')
.with_response(
WSResponse()
)
.with_response(
WSResponse()
)
.with_message(
WSMessage()
)
)
await ws_tester.run()
assert ws_tester.is_complete()
'''
def __init__(self, uri: str) -> None:
'''
Parameters:
uri (str): The uri of the websocket api
'''
self.uri: str = uri
self.parameters: dict = {}
self.headers: dict = {}
self.messages: list = []
self.requests: list = []
self.sent_messages: list = []
self.sent_requests: list = []
self.expected_responses: list = []
self.received_responses: list = []
self.received_json: list = []
self.received_request_responses: list = []
self.response_timeout: float = 10.0
self.message_timeout: float = 10.0
self.request_timeout: float = 10.0
self.test_timeout: float = 60.0
self.log_responses_on_error: bool = False
def with_parameter(self, key: str, value: object) -> 'WSTest':
'''
Adds a key/value pair to the parameters dictionary
Parameters are query parameters used to connect to the websocket
Parameters:
key (str): The key of the parameter
value (obj, optional): The value of the parameter
Returns:
(WSTest): The WSTest instance with_parameter was called on
'''
self.parameters[key] = value
return self
def with_header(self, key: str, value: object) -> 'WSTest':
'''
Adds a key/value pair to the headers dictionary
Headers are passed to the websockets connect method
Parameters:
key (str): The key/name of the header
value (obj): The value of the header
Returns:
(WSTest): The WSTest instance with_header was called on
'''
self.headers[key] = value
return self
def with_response(self, response: WSResponse) -> 'WSTest':
'''
Adds a response to the expected responses list
Parameters:
response (WSResponse): An expected response
Returns:
(WSTest): The WSTest instance with_response was called on
'''
self.expected_responses.append(response)
return self
def with_message(self, message: WSMessage) -> 'WSTest':
'''
Adds a message to the messages list
Parameters:
message (WSMessage): A message to send to the websocket
Returns:
(WSTest): The WSTest instance with_message was called on
'''
self.messages.append(message)
return self
def with_response_timeout(self, timeout: float) -> 'WSTest':
'''
Sets the response timeout in seconds
Parameters:
timeout (float): The time to wait for a response in seconds
Returns:
(WSTest): The WSTest instance with_response_timeout was called on
'''
self.response_timeout = timeout
return self
def with_message_timeout(self, timeout: float) -> 'WSTest':
'''
Sets the message timeout in seconds
Parameters:
timeout (float): The time to wait for a message to send in seconds
Returns:
(WSTest): The WSTest instance with_message_timeout was called on
'''
self.message_timeout = timeout
return self
def with_request_timeout(self, timeout: float) -> 'WSTest':
'''
Sets the rest request timeout in seconds
Parameters:
timeout (float): The time to wait for a request response in seconds
Returns:
(WSTest): The WSTest instance with_request_timeout was called on
'''
self.request_timeout = timeout
return self
def with_test_timeout(self, timeout: float) -> 'WSTest':
'''
Sets the test timeout in seconds
Parameters:
timeout (float): The time to wait for the test to finish in seconds
Returns:
(WSTest): The WSTest instance with_test_timeout was called on
'''
self.test_timeout = timeout
return self
def with_received_response_logging(self) -> 'WSTest':
'''
Enables received response logging when an exception is thrown
Returns:
(WSTest): The WSTest instance set_log_responses_on_error was called on
'''
self.log_responses_on_error = True
return self
def with_request(self, request: RestRequest) -> 'WSTest':
'''
Sets Rest request on a websocket object
Parameters:
request (RestRequest): The request object with all relevant data for rest request execution
Returns:
(WSTest): The WSTest instance with_request was called on
'''
self.requests.append(request)
return self
async def run(self) -> None:
'''
Runs the integration tests
Sends any messages to the websocket
Receives any responses from the websocket
Raises:
WSTimeoutError: If the test/sending/receiving fails to finish within the time limit
'''
kwargs: dict = {}
connection_string = self._get_connection_string()
# add ssl if using wss
if connection_string.startswith('wss://'):
kwargs['ssl'] = ssl.SSLContext()
# add headers if headers are set
if self.headers:
kwargs['extra_headers'] = self.headers
websocket = await websockets.connect(connection_string, **kwargs)
try:
# Run the receive and send methods async with a timeout
await asyncio.wait_for(self._runner(websocket), timeout=self.test_timeout)
except asyncio.TimeoutError as ex:
raise WSTimeoutError('Timed out waiting for test to finish') from ex
finally:
await websocket.close()
async def _runner(self, websocket: WebSocketClientProtocol) -> None:
await asyncio.gather(self._receive(websocket), self._send(websocket), self._request())
async def _receive(self, websocket: WebSocketClientProtocol) -> None:
# iterate while there are still expected responses that haven't been received yet
while self.expected_responses:
try:
response = await asyncio.wait_for(websocket.recv(), timeout=self.response_timeout)
await self._receive_handler(websocket, response)
except asyncio.TimeoutError as ex:
error_message = self._get_receive_error_message()
raise WSTimeoutError(error_message) from ex
async def _receive_handler(self, websocket: WebSocketClientProtocol, response: Data) -> None:
self.received_json.append(response)
parsed_response = json.loads(response)
for expected_response in self.expected_responses:
if expected_response.is_match(parsed_response):
self.received_responses.append(expected_response)
self.expected_responses.remove(expected_response)
await self._trigger_handler(websocket, expected_response, parsed_response)
break
async def _trigger_handler(self, websocket: WebSocketClientProtocol, response: WSResponse,
raw_response: dict) -> None:
for message in response.triggers:
message = message.resolve(raw_response)
await self._send_handler(websocket, message)
async def _send(self, websocket: WebSocketClientProtocol) -> None:
while self.messages:
message = self.messages.pop(0)
await self._send_handler(websocket, message)
async def _send_handler(self, websocket: WebSocketClientProtocol, message: WSMessage) -> None:
try:
if message.delay:
await asyncio.sleep(message.delay)
await asyncio.wait_for(websocket.send(str(message)), timeout=self.message_timeout)
self.sent_messages.append(message)
except asyncio.TimeoutError as ex:
error_message = 'Timed out trying to send message:\n' + str(message)
raise WSTimeoutError(error_message) from ex
async def _request(self) -> None:
while self.requests:
request = self.requests.pop(0)
await self._request_handler(request)
async def _request_handler(self, request: RestRequest) -> None:
try:
if request.delay:
await asyncio.sleep(request.delay)
response = request.send(self.request_timeout)
self.received_request_responses.append(response)
self.sent_requests.append(request)
except (ConnectTimeout, ReadTimeout) as ex:
error_message = 'Timed out trying to send request:\n' + str(request)
raise WSTimeoutError(error_message) from ex
def _get_connection_string(self) -> str:
# wss://example.com?first=123&second=456
connection_string = self.uri.strip()
if self.parameters:
params = '&'.join(f'{str(key).strip()}={str(value).strip()}' for key, value in self.parameters.items())
connection_string += f'?{params}'
return connection_string
def _get_receive_error_message(self) -> str:
error_message = 'Timed out waiting for responses:'
for response in self.expected_responses:
error_message += '\n' + str(response)
if self.log_responses_on_error:
error_message += '\nReceived responses:'
for json_response in self.received_json:
error_message += '\n' + str(json_response)
return error_message
def is_complete(self) -> bool:
'''
Checks whether the test has finished running
Returns:
(bool): Value to indicate whether the test has finished
'''
return not self.expected_responses and not self.messages and not self.requests
|
the-stack_0_27218
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import discord
import itertools
import inspect
import bisect
import logging
import re
from collections import OrderedDict, namedtuple
# Needed for the setup.py script
__version__ = '1.0.0-a'
# consistency with the `discord` namespaced logging
log = logging.getLogger(__name__)
class MenuError(Exception):
pass
class CannotEmbedLinks(MenuError):
def __init__(self):
super().__init__('Bot does not have embed links permission in this channel.')
class CannotSendMessages(MenuError):
def __init__(self):
super().__init__('Bot cannot send messages in this channel.')
class CannotAddReactions(MenuError):
def __init__(self):
super().__init__('Bot cannot add reactions in this channel.')
class CannotReadMessageHistory(MenuError):
def __init__(self):
super().__init__('Bot does not have Read Message History permissions in this channel.')
class Position:
__slots__ = ('number', 'bucket')
def __init__(self, number, *, bucket=1):
self.bucket = bucket
self.number = number
def __lt__(self, other):
if not isinstance(other, Position) or not isinstance(self, Position):
return NotImplemented
return (self.bucket, self.number) < (other.bucket, other.number)
def __eq__(self, other):
return isinstance(other, Position) and other.bucket == self.bucket and other.number == self.number
def __le__(self, other):
r = Position.__lt__(other, self)
if r is NotImplemented:
return NotImplemented
return not r
def __gt__(self, other):
return Position.__lt__(other, self)
def __ge__(self, other):
r = Position.__lt__(self, other)
if r is NotImplemented:
return NotImplemented
return not r
def __repr__(self):
return '<{0.__class__.__name__}: {0.number}>'.format(self)
class Last(Position):
__slots__ = ()
def __init__(self, number=0):
super().__init__(number, bucket=2)
class First(Position):
__slots__ = ()
def __init__(self, number=0):
super().__init__(number, bucket=0)
_custom_emoji = re.compile(r'<?(?P<animated>a)?:?(?P<name>[A-Za-z0-9\_]+):(?P<id>[0-9]{13,20})>?')
def _cast_emoji(obj, *, _custom_emoji=_custom_emoji):
if isinstance(obj, discord.PartialEmoji):
return obj
obj = str(obj)
match = _custom_emoji.match(obj)
if match is not None:
groups = match.groupdict()
animated = bool(groups['animated'])
emoji_id = int(groups['id'])
name = groups['name']
return discord.PartialEmoji(name=name, animated=animated, id=emoji_id)
return discord.PartialEmoji(name=obj, id=None, animated=False)
class Button:
"""Represents a reaction-style button for the :class:`Menu`.
There are two ways to create this, the first being through explicitly
creating this class and the second being through the decorator interface,
:func:`button`.
The action must have both a ``self`` and a ``payload`` parameter
of type :class:`discord.RawReactionActionEvent`.
Attributes
------------
emoji: :class:`discord.PartialEmoji`
The emoji to use as the button. Note that passing a string will
transform it into a :class:`discord.PartialEmoji`.
action
A coroutine that is called when the button is pressed.
skip_if: Optional[Callable[[:class:`Menu`], :class:`bool`]]
A callable that detects whether it should be skipped.
A skipped button does not show up in the reaction list
and will not be processed.
position: :class:`Position`
The position the button should have in the initial order.
Note that since Discord does not actually maintain reaction
order, this is a best effort attempt to have an order until
the user restarts their client. Defaults to ``Position(0)``.
lock: :class:`bool`
Whether the button should lock all other buttons from being processed
until this button is done. Defaults to ``True``.
"""
__slots__ = ('emoji', '_action', '_skip_if', 'position', 'lock')
def __init__(self, emoji, action, *, skip_if=None, position=None, lock=True):
self.emoji = _cast_emoji(emoji)
self.action = action
self.skip_if = skip_if
self.position = position or Position(0)
self.lock = lock
@property
def skip_if(self):
return self._skip_if
@skip_if.setter
def skip_if(self, value):
if value is None:
self._skip_if = lambda x: False
return
try:
menu_self = value.__self__
except AttributeError:
self._skip_if = value
else:
# Unfurl the method to not be bound
if not isinstance(menu_self, Menu):
raise TypeError('skip_if bound method must be from Menu not %r' % menu_self)
self._skip_if = value.__func__
@property
def action(self):
return self._action
@action.setter
def action(self, value):
try:
menu_self = value.__self__
except AttributeError:
pass
else:
# Unfurl the method to not be bound
if not isinstance(menu_self, Menu):
raise TypeError('action bound method must be from Menu not %r' % menu_self)
value = value.__func__
if not inspect.iscoroutinefunction(value):
raise TypeError('action must be a coroutine not %r' % value)
self._action = value
def __call__(self, menu, payload):
if self.skip_if(menu):
return
return self._action(menu, payload)
def __str__(self):
return str(self.emoji)
def is_valid(self, menu):
return not self.skip_if(menu)
def button(emoji, **kwargs):
"""Denotes a method to be button for the :class:`Menu`.
The methods being wrapped must have both a ``self`` and a ``payload``
parameter of type :class:`discord.RawReactionActionEvent`.
The keyword arguments are forwarded to the :class:`Button` constructor.
Example
---------
.. code-block:: python3
class MyMenu(Menu):
async def send_initial_message(self, ctx, channel):
return await channel.send(f'Hello {ctx.author}')
@button('\\N{THUMBS UP SIGN}')
async def on_thumbs_up(self, payload):
await self.message.edit(content=f'Thanks {self.ctx.author}!')
@button('\\N{THUMBS DOWN SIGN}')
async def on_thumbs_down(self, payload):
await self.message.edit(content=f"That's not nice {self.ctx.author}...")
Parameters
------------
emoji: Union[:class:`str`, :class:`discord.PartialEmoji`]
The emoji to use for the button.
"""
def decorator(func):
func.__menu_button__ = _cast_emoji(emoji)
func.__menu_button_kwargs__ = kwargs
return func
return decorator
class _MenuMeta(type):
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# This is needed to maintain member order for the buttons
return OrderedDict()
def __new__(cls, name, bases, attrs, **kwargs):
buttons = []
new_cls = super().__new__(cls, name, bases, attrs)
inherit_buttons = kwargs.pop('inherit_buttons', True)
if inherit_buttons:
# walk MRO to get all buttons even in subclasses
for base in reversed(new_cls.__mro__):
for elem, value in base.__dict__.items():
try:
value.__menu_button__
except AttributeError:
continue
else:
buttons.append(value)
else:
for elem, value in attrs.items():
try:
value.__menu_button__
except AttributeError:
continue
else:
buttons.append(value)
new_cls.__menu_buttons__ = buttons
return new_cls
def get_buttons(cls):
buttons = OrderedDict()
for func in cls.__menu_buttons__:
emoji = func.__menu_button__
buttons[emoji] = Button(emoji, func, **func.__menu_button_kwargs__)
return buttons
class Menu(metaclass=_MenuMeta):
r"""An interface that allows handling menus by using reactions as buttons.
Buttons should be marked with the :func:`button` decorator. Please note that
this expects the methods to have a single parameter, the ``payload``. This
``payload`` is of type :class:`discord.RawReactionActionEvent`.
Attributes
------------
timeout: :class:`float`
The timeout to wait between button inputs.
delete_message_after: :class:`bool`
Whether to delete the message after the menu interaction is done.
clear_reactions_after: :class:`bool`
Whether to clear reactions after the menu interaction is done.
Note that :attr:`delete_message_after` takes priority over this attribute.
If the bot does not have permissions to clear the reactions then it will
delete the reactions one by one.
check_embeds: :class:`bool`
Whether to verify embed permissions as well.
ctx: Optional[:class:`commands.Context`]
The context that started this pagination session or ``None`` if it hasn't
been started yet.
bot: Optional[:class:`commands.Bot`]
The bot that is running this pagination session or ``None`` if it hasn't
been started yet.
message: Optional[:class:`discord.Message`]
The message that has been sent for handling the menu. This is the returned
message of :meth:`send_initial_message`. You can set it in order to avoid
calling :meth:`send_initial_message`\, if for example you have a pre-existing
message you want to attach a menu to.
"""
def __init__(self, *, timeout=180.0, delete_message_after=False,
clear_reactions_after=False, check_embeds=False, message=None):
self.timeout = timeout
self.delete_message_after = delete_message_after
self.clear_reactions_after = clear_reactions_after
self.check_embeds = check_embeds
self._can_remove_reactions = False
self.__tasks = []
self._running = True
self.message = message
self.ctx = None
self.bot = None
self._author_id = None
self._buttons = self.__class__.get_buttons()
self._lock = asyncio.Lock()
self._event = asyncio.Event()
@discord.utils.cached_property
def buttons(self):
"""Retrieves the buttons that are to be used for this menu session.
Skipped buttons are not in the resulting dictionary.
Returns
---------
Mapping[:class:`str`, :class:`Button`]
A mapping of button emoji to the actual button class.
"""
buttons = sorted(self._buttons.values(), key=lambda b: b.position)
return {
button.emoji: button
for button in buttons
if button.is_valid(self)
}
def add_button(self, button, *, react=False):
"""|maybecoro|
Adds a button to the list of buttons.
If the menu has already been started then the button will
not be added unless the ``react`` keyword-only argument is
set to ``True``. Note that when this happens this function
will need to be awaited.
If a button with the same emoji is added then it is overridden.
.. warning::
If the menu has started and the reaction is added, the order
property of the newly added button is ignored due to an API
limitation with Discord and the fact that reaction ordering
is not guaranteed.
Parameters
------------
button: :class:`Button`
The button to add.
react: :class:`bool`
Whether to add a reaction if the menu has been started.
Note this turns the method into a coroutine.
Raises
---------
MenuError
Tried to use ``react`` when the menu had not been started.
discord.HTTPException
Adding the reaction failed.
"""
self._buttons[button.emoji] = button
if react:
if self.__tasks:
async def wrapped():
# Add the reaction
try:
await self.message.add_reaction(button.emoji)
except discord.HTTPException:
raise
else:
# Update the cache to have the value
self.buttons[button.emoji] = button
return wrapped()
async def dummy():
raise MenuError('Menu has not been started yet')
return dummy()
def remove_button(self, emoji, *, react=False):
"""|maybecoro|
Removes a button from the list of buttons.
This operates similar to :meth:`add_button`.
Parameters
------------
emoji: Union[:class:`Button`, :class:`str`]
The emoji or the button to remove.
react: :class:`bool`
Whether to remove the reaction if the menu has been started.
Note this turns the method into a coroutine.
Raises
---------
MenuError
Tried to use ``react`` when the menu had not been started.
discord.HTTPException
Removing the reaction failed.
"""
if isinstance(emoji, Button):
emoji = emoji.emoji
else:
emoji = _cast_emoji(emoji)
self._buttons.pop(emoji, None)
if react:
if self.__tasks:
async def wrapped():
# Remove the reaction from being processable
# Removing it from the cache first makes it so the check
# doesn't get triggered.
self.buttons.pop(emoji, None)
await self.message.remove_reaction(emoji, self.__me)
return wrapped()
async def dummy():
raise MenuError('Menu has not been started yet')
return dummy()
def clear_buttons(self, *, react=False):
"""|maybecoro|
Removes all buttons from the list of buttons.
If the menu has already been started then the buttons will
not be removed unless the ``react`` keyword-only argument is
set to ``True``. Note that when this happens this function
will need to be awaited.
Parameters
------------
react: :class:`bool`
Whether to clear the reactions if the menu has been started.
Note this turns the method into a coroutine.
Raises
---------
MenuError
Tried to use ``react`` when the menu had not been started.
discord.HTTPException
Clearing the reactions failed.
"""
self._buttons.clear()
if react:
if self.__tasks:
async def wrapped():
# A fast path if we have permissions
if self._can_remove_reactions:
try:
del self.buttons
except AttributeError:
pass
finally:
await self.message.clear_reactions()
return
# Remove the cache (the next call will have the updated buttons)
reactions = list(self.buttons.keys())
try:
del self.buttons
except AttributeError:
pass
for reaction in reactions:
await self.message.remove_reaction(reaction, self.__me)
return wrapped()
async def dummy():
raise MenuError('Menu has not been started yet')
return dummy()
def should_add_reactions(self):
""":class:`bool`: Whether to add reactions to this menu session."""
return len(self.buttons)
def _verify_permissions(self, ctx, channel, permissions):
if not permissions.send_messages:
raise CannotSendMessages()
if self.check_embeds and not permissions.embed_links:
raise CannotEmbedLinks()
self._can_remove_reactions = permissions.manage_messages
if self.should_add_reactions():
if not permissions.add_reactions:
raise CannotAddReactions()
if not permissions.read_message_history:
raise CannotReadMessageHistory()
def reaction_check(self, payload):
"""The function that is used to check whether the payload should be processed.
This is passed to :meth:`discord.ext.commands.Bot.wait_for <Bot.wait_for>`.
There should be no reason to override this function for most users.
Parameters
------------
payload: :class:`discord.RawReactionActionEvent`
The payload to check.
Returns
---------
:class:`bool`
Whether the payload should be processed.
"""
if payload.message_id != self.message.id:
return False
if payload.user_id not in {self.bot.owner_id, self._author_id, *self.bot.owner_ids}:
return False
return payload.emoji in self.buttons
async def _internal_loop(self):
try:
self.__timed_out = False
loop = self.bot.loop
# Ensure the name exists for the cancellation handling
tasks = []
while self._running:
tasks = [
asyncio.ensure_future(self.bot.wait_for('raw_reaction_add', check=self.reaction_check)),
]
done, pending = await asyncio.wait(tasks, timeout=self.timeout, return_when=asyncio.FIRST_COMPLETED)
for task in pending:
task.cancel()
if len(done) == 0:
raise asyncio.TimeoutError()
# Exception will propagate if e.g. cancelled or timed out
payload = done.pop().result()
loop.create_task(self.update(payload))
# NOTE: Removing the reaction ourselves after it's been done when
# mixed with the checks above is incredibly racy.
# There is no guarantee when the MESSAGE_REACTION_REMOVE event will
# be called, and chances are when it does happen it'll always be
# after the remove_reaction HTTP call has returned back to the caller
# which means that the stuff above will catch the reaction that we
# just removed.
# For the future sake of myself and to save myself the hours in the future
# consider this my warning.
except asyncio.TimeoutError:
self.__timed_out = True
finally:
self._event.set()
# Cancel any outstanding tasks (if any)
for task in tasks:
task.cancel()
try:
await self.finalize(self.__timed_out)
except Exception:
pass
finally:
self.__timed_out = False
# Can't do any requests if the bot is closed
if self.bot.is_closed():
return
# Wrap it in another block anyway just to ensure
# nothing leaks out during clean-up
try:
if self.delete_message_after:
return await self.message.delete()
if self.clear_reactions_after:
if self._can_remove_reactions:
return await self.message.clear_reactions()
for button_emoji in self.buttons:
try:
await self.message.remove_reaction(button_emoji, self.__me)
except discord.HTTPException:
continue
except Exception:
pass
async def update(self, payload):
"""|coro|
Updates the menu after an event has been received.
Parameters
-----------
payload: :class:`discord.RawReactionActionEvent`
The reaction event that triggered this update.
"""
button = self.buttons[payload.emoji]
if not self._running:
return
try:
if button.lock:
async with self._lock:
if self._running:
await button(self, payload)
else:
await button(self, payload)
except Exception as exc:
await self.on_menu_button_error(exc)
async def on_menu_button_error(self, exc):
"""|coro|
Handles reporting of errors while updating the menu from events.
The default behaviour is to log the exception.
This may be overriden by subclasses.
Parameters
----------
exc: :class:`Exception`
The exception which was raised during a menu update.
"""
# some users may wish to take other actions during or beyond logging
# which would require awaiting, such as stopping an erroring menu.
log.exception("Unhandled exception during menu update.", exc_info=exc)
async def start(self, ctx, *, channel=None, wait=False):
"""|coro|
Starts the interactive menu session.
Parameters
-----------
ctx: :class:`Context`
The invocation context to use.
channel: :class:`discord.abc.Messageable`
The messageable to send the message to. If not given
then it defaults to the channel in the context.
wait: :class:`bool`
Whether to wait until the menu is completed before
returning back to the caller.
Raises
-------
MenuError
An error happened when verifying permissions.
discord.HTTPException
Adding a reaction failed.
"""
# Clear the buttons cache and re-compute if possible.
try:
del self.buttons
except AttributeError:
pass
self.bot = bot = ctx.bot
self.ctx = ctx
self._author_id = ctx.author.id
channel = channel or ctx.channel
is_guild = isinstance(channel, discord.abc.GuildChannel)
me = channel.guild.me if is_guild else ctx.bot.user
permissions = channel.permissions_for(me)
self.__me = discord.Object(id=me.id)
self._verify_permissions(ctx, channel, permissions)
self._event.clear()
msg = self.message
if msg is None:
self.message = msg = await self.send_initial_message(ctx, channel)
if self.should_add_reactions():
# Start the task first so we can listen to reactions before doing anything
for task in self.__tasks:
task.cancel()
self.__tasks.clear()
self._running = True
self.__tasks.append(bot.loop.create_task(self._internal_loop()))
async def add_reactions_task():
for emoji in self.buttons:
await msg.add_reaction(emoji)
self.__tasks.append(bot.loop.create_task(add_reactions_task()))
if wait:
await self._event.wait()
async def finalize(self, timed_out):
"""|coro|
A coroutine that is called when the menu loop has completed
its run. This is useful if some asynchronous clean-up is
required after the fact.
Parameters
--------------
timed_out: :class:`bool`
Whether the menu completed due to timing out.
"""
return
async def send_initial_message(self, ctx, channel):
"""|coro|
Sends the initial message for the menu session.
This is internally assigned to the :attr:`message` attribute.
Subclasses must implement this if they don't set the
:attr:`message` attribute themselves before starting the
menu via :meth:`start`.
Parameters
------------
ctx: :class:`Context`
The invocation context to use.
channel: :class:`discord.abc.Messageable`
The messageable to send the message to.
Returns
--------
:class:`discord.Message`
The message that has been sent.
"""
raise NotImplementedError
def stop(self):
"""Stops the internal loop."""
self._running = False
for task in self.__tasks:
task.cancel()
self.__tasks.clear()
class PageSource:
"""An interface representing a menu page's data source for the actual menu page.
Subclasses must implement the backing resource along with the following methods:
- :meth:`get_page`
- :meth:`is_paginating`
- :meth:`format_page`
"""
async def _prepare_once(self):
try:
# Don't feel like formatting hasattr with
# the proper mangling
# read this as follows:
# if hasattr(self, '__prepare')
# except that it works as you expect
self.__prepare
except AttributeError:
await self.prepare()
self.__prepare = True
async def prepare(self):
"""|coro|
A coroutine that is called after initialisation
but before anything else to do some asynchronous set up
as well as the one provided in ``__init__``.
By default this does nothing.
This coroutine will only be called once.
"""
return
def is_paginating(self):
"""An abstract method that notifies the :class:`MenuPages` whether or not
to start paginating. This signals whether to add reactions or not.
Subclasses must implement this.
Returns
--------
:class:`bool`
Whether to trigger pagination.
"""
raise NotImplementedError
def get_max_pages(self):
"""An optional abstract method that retrieves the maximum number of pages
this page source has. Useful for UX purposes.
The default implementation returns ``None``.
Returns
--------
Optional[:class:`int`]
The maximum number of pages required to properly
paginate the elements, if given.
"""
return None
async def get_page(self, page_number):
"""|coro|
An abstract method that retrieves an object representing the object to format.
Subclasses must implement this.
.. note::
The page_number is zero-indexed between [0, :meth:`get_max_pages`),
if there is a maximum number of pages.
Parameters
-----------
page_number: :class:`int`
The page number to access.
Returns
---------
Any
The object represented by that page.
This is passed into :meth:`format_page`.
"""
raise NotImplementedError
async def format_page(self, menu, page):
"""|maybecoro|
An abstract method to format the page.
This method must return one of the following types.
If this method returns a ``str`` then it is interpreted as returning
the ``content`` keyword argument in :meth:`discord.Message.edit`
and :meth:`discord.abc.Messageable.send`.
If this method returns a :class:`discord.Embed` then it is interpreted
as returning the ``embed`` keyword argument in :meth:`discord.Message.edit`
and :meth:`discord.abc.Messageable.send`.
If this method returns a ``dict`` then it is interpreted as the
keyword-arguments that are used in both :meth:`discord.Message.edit`
and :meth:`discord.abc.Messageable.send`. The two of interest are
``embed`` and ``content``.
Parameters
------------
menu: :class:`Menu`
The menu that wants to format this page.
page: Any
The page returned by :meth:`PageSource.get_page`.
Returns
---------
Union[:class:`str`, :class:`discord.Embed`, :class:`dict`]
See above.
"""
raise NotImplementedError
class MenuPages(Menu):
"""A special type of Menu dedicated to pagination.
Attributes
------------
current_page: :class:`int`
The current page that we are in. Zero-indexed
between [0, :attr:`PageSource.max_pages`).
"""
def __init__(self, source, **kwargs):
self._source = source
self.current_page = 0
super().__init__(**kwargs)
@property
def source(self):
""":class:`PageSource`: The source where the data comes from."""
return self._source
async def change_source(self, source):
"""|coro|
Changes the :class:`PageSource` to a different one at runtime.
Once the change has been set, the menu is moved to the first
page of the new source if it was started. This effectively
changes the :attr:`current_page` to 0.
Raises
--------
TypeError
A :class:`PageSource` was not passed.
"""
if not isinstance(source, PageSource):
raise TypeError('Expected {0!r} not {1.__class__!r}.'.format(PageSource, source))
self._source = source
self.current_page = 0
if self.message is not None:
await source._prepare_once()
await self.show_page(0)
def should_add_reactions(self):
return self._source.is_paginating()
async def _get_kwargs_from_page(self, page):
value = await discord.utils.maybe_coroutine(self._source.format_page, self, page)
if isinstance(value, dict):
return value
elif isinstance(value, str):
return { 'content': value, 'embed': None }
elif isinstance(value, discord.Embed):
return { 'embed': value, 'content': None }
async def show_page(self, page_number):
page = await self._source.get_page(page_number)
self.current_page = page_number
kwargs = await self._get_kwargs_from_page(page)
await self.message.edit(**kwargs)
async def send_initial_message(self, ctx, channel):
"""|coro|
The default implementation of :meth:`Menu.send_initial_message`
for the interactive pagination session.
This implementation shows the first page of the source.
"""
page = await self._source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
return await channel.send(**kwargs)
async def start(self, ctx, *, channel=None, wait=False):
await self._source._prepare_once()
await super().start(ctx, channel=channel, wait=wait)
async def show_checked_page(self, page_number):
max_pages = self._source.get_max_pages()
try:
if max_pages is None:
# If it doesn't give maximum pages, it cannot be checked
await self.show_page(page_number)
elif max_pages > page_number >= 0:
await self.show_page(page_number)
except IndexError:
# An error happened that can be handled, so ignore it.
pass
async def show_current_page(self):
if self._source.is_paginating():
await self.show_page(self.current_page)
def _skip_double_triangle_buttons(self):
max_pages = self._source.get_max_pages()
if max_pages is None:
return True
return max_pages <= 2
@button('\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f',
position=First(0), skip_if=_skip_double_triangle_buttons)
async def go_to_first_page(self, payload):
"""go to the first page"""
await self.show_page(0)
@button('\N{BLACK LEFT-POINTING TRIANGLE}\ufe0f', position=First(1))
async def go_to_previous_page(self, payload):
"""go to the previous page"""
await self.show_checked_page(self.current_page - 1)
@button('\N{BLACK RIGHT-POINTING TRIANGLE}\ufe0f', position=Last(0))
async def go_to_next_page(self, payload):
"""go to the next page"""
await self.show_checked_page(self.current_page + 1)
@button('\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f',
position=Last(1), skip_if=_skip_double_triangle_buttons)
async def go_to_last_page(self, payload):
"""go to the last page"""
# The call here is safe because it's guarded by skip_if
await self.show_page(self._source.get_max_pages() - 1)
@button('\N{BLACK SQUARE FOR STOP}\ufe0f', position=Last(2))
async def stop_pages(self, payload):
"""stops the pagination session."""
self.stop()
class ListPageSource(PageSource):
"""A data source for a sequence of items.
This page source does not handle any sort of formatting, leaving it up
to the user. To do so, implement the :meth:`format_page` method.
Attributes
------------
entries: Sequence[Any]
The sequence of items to paginate.
per_page: :class:`int`
How many elements are in a page.
"""
def __init__(self, entries, *, per_page):
self.entries = entries
self.per_page = per_page
pages, left_over = divmod(len(entries), per_page)
if left_over:
pages += 1
self._max_pages = pages
def is_paginating(self):
""":class:`bool`: Whether pagination is required."""
return len(self.entries) > self.per_page
def get_max_pages(self):
""":class:`int`: The maximum number of pages required to paginate this sequence."""
return self._max_pages
async def get_page(self, page_number):
"""Returns either a single element of the sequence or
a slice of the sequence.
If :attr:`per_page` is set to ``1`` then this returns a single
element. Otherwise it returns at most :attr:`per_page` elements.
Returns
---------
Union[Any, List[Any]]
The data returned.
"""
if self.per_page == 1:
return self.entries[page_number]
else:
base = page_number * self.per_page
return self.entries[base:base + self.per_page]
_GroupByEntry = namedtuple('_GroupByEntry', 'key items')
class GroupByPageSource(ListPageSource):
"""A data source for grouped by sequence of items.
This inherits from :class:`ListPageSource`.
This page source does not handle any sort of formatting, leaving it up
to the user. To do so, implement the :meth:`format_page` method.
Parameters
------------
entries: Sequence[Any]
The sequence of items to paginate and group.
key: Callable[[Any], Any]
A key function to do the grouping with.
sort: :class:`bool`
Whether to sort the sequence before grouping it.
The elements are sorted according to the ``key`` function passed.
per_page: :class:`int`
How many elements to have per page of the group.
"""
def __init__(self, entries, *, key, per_page, sort=True):
self.__entries = entries if not sort else sorted(entries, key=key)
nested = []
self.nested_per_page = per_page
for k, g in itertools.groupby(self.__entries, key=key):
g = list(g)
if not g:
continue
size = len(g)
# Chunk the nested pages
nested.extend(_GroupByEntry(key=k, items=g[i:i+per_page]) for i in range(0, size, per_page))
super().__init__(nested, per_page=1)
async def get_page(self, page_number):
return self.entries[page_number]
async def format_page(self, menu, entry):
"""An abstract method to format the page.
This works similar to the :meth:`ListPageSource.format_page` except
the return type of the ``entry`` parameter is documented.
Parameters
------------
menu: :class:`Menu`
The menu that wants to format this page.
entry
A namedtuple with ``(key, items)`` representing the key of the
group by function and a sequence of paginated items within that
group.
Returns
---------
:class:`dict`
A dictionary representing keyword-arguments to pass to
the message related calls.
"""
raise NotImplementedError
def _aiter(obj, *, _isasync=inspect.iscoroutinefunction):
cls = obj.__class__
try:
async_iter = cls.__aiter__
except AttributeError:
raise TypeError('{0.__name__!r} object is not an async iterable'.format(cls))
async_iter = async_iter(obj)
if _isasync(async_iter):
raise TypeError('{0.__name__!r} object is not an async iterable'.format(cls))
return async_iter
class AsyncIteratorPageSource(PageSource):
"""A data source for data backed by an asynchronous iterator.
This page source does not handle any sort of formatting, leaving it up
to the user. To do so, implement the :meth:`format_page` method.
Parameters
------------
iter: AsyncIterator[Any]
The asynchronous iterator to paginate.
per_page: :class:`int`
How many elements to have per page.
"""
def __init__(self, iterator, *, per_page):
self.iterator = _aiter(iterator)
self.per_page = per_page
self._exhausted = False
self._cache = []
async def _iterate(self, n):
it = self.iterator
cache = self._cache
for i in range(0, n):
try:
elem = await it.__anext__()
except StopAsyncIteration:
self._exhausted = True
break
else:
cache.append(elem)
async def prepare(self, *, _aiter=_aiter):
# Iterate until we have at least a bit more single page
await self._iterate(self.per_page + 1)
def is_paginating(self):
""":class:`bool`: Whether pagination is required."""
return len(self._cache) > self.per_page
async def _get_single_page(self, page_number):
if page_number < 0:
raise IndexError('Negative page number.')
if not self._exhausted and len(self._cache) <= page_number:
await self._iterate((page_number + 1) - len(self._cache))
return self._cache[page_number]
async def _get_page_range(self, page_number):
if page_number < 0:
raise IndexError('Negative page number.')
base = page_number * self.per_page
max_base = base + self.per_page
if not self._exhausted and len(self._cache) <= max_base:
await self._iterate((max_base + 1) - len(self._cache))
entries = self._cache[base:max_base]
if not entries and max_base > len(self._cache):
raise IndexError('Went too far')
return entries
async def get_page(self, page_number):
"""Returns either a single element of the sequence or
a slice of the sequence.
If :attr:`per_page` is set to ``1`` then this returns a single
element. Otherwise it returns at most :attr:`per_page` elements.
Returns
---------
Union[Any, List[Any]]
The data returned.
"""
if self.per_page == 1:
return await self._get_single_page(page_number)
else:
return await self._get_page_range(page_number)
|
the-stack_0_27220
|
import logging
from contextlib import contextmanager
from django.contrib.admin.models import LogEntry
from django.core.management.base import CommandError
from django.db import transaction
from django.db.models import Q
from morango.models import Buffer
from morango.models import Certificate
from morango.models import DatabaseMaxCounter
from morango.models import RecordMaxCounter
from morango.models import RecordMaxCounterBuffer
from morango.models import Store
from morango.models import SyncSession
from morango.models import TransferSession
from kolibri.core.analytics.models import PingbackNotificationDismissed
from kolibri.core.auth.management.utils import DisablePostDeleteSignal
from kolibri.core.auth.management.utils import get_facility
from kolibri.core.auth.management.utils import GroupDeletion
from kolibri.core.auth.models import AdHocGroup
from kolibri.core.auth.models import Classroom
from kolibri.core.auth.models import Collection
from kolibri.core.auth.models import dataset_cache
from kolibri.core.auth.models import FacilityDataset
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.models import LearnerGroup
from kolibri.core.auth.models import Membership
from kolibri.core.auth.models import Role
from kolibri.core.auth.utils import confirm_or_exit
from kolibri.core.device.models import DevicePermissions
from kolibri.core.exams.models import Exam
from kolibri.core.exams.models import ExamAssignment
from kolibri.core.lessons.models import Lesson
from kolibri.core.lessons.models import LessonAssignment
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import ExamAttemptLog
from kolibri.core.logger.models import ExamLog
from kolibri.core.logger.models import MasteryLog
from kolibri.core.logger.models import UserSessionLog
from kolibri.core.tasks.management.commands.base import AsyncCommand
logger = logging.getLogger(__name__)
def chunk(things, size):
"""
Chunk generator
:type things: list
:type size: int
"""
for i in range(0, len(things), size):
yield things[i : i + size]
class Command(AsyncCommand):
help = "This command initiates the deletion process for a facility and all of it's related data."
def add_arguments(self, parser):
parser.add_argument(
"--facility",
action="store",
type=str,
help="The ID of the facility to delete",
)
parser.add_argument(
"--strict",
action="store_true",
help="Enforce that deletion count matches expected count",
)
parser.add_argument("--noninteractive", action="store_true")
def handle_async(self, *args, **options):
noninteractive = options["noninteractive"]
strict = options["strict"]
facility = get_facility(
facility_id=options["facility"], noninteractive=noninteractive
)
dataset_id = facility.dataset_id
logger.info(
"Found facility {} <{}> for deletion".format(facility.id, dataset_id)
)
if not noninteractive:
# ensure the user REALLY wants to do this!
confirm_or_exit(
"Are you sure you wish to permanently delete this facility? This will DELETE ALL DATA FOR THE FACILITY."
)
confirm_or_exit(
"ARE YOU SURE? If you do this, there is no way to recover the facility data on this device."
)
# everything should get cascade deleted from the facility, but we'll check anyway
delete_group = GroupDeletion(
"Main",
groups=[
self._get_morango_models(dataset_id),
self._get_log_models(dataset_id),
self._get_class_models(dataset_id),
self._get_users(dataset_id),
self._get_facility_dataset(dataset_id),
],
)
logger.info(
"Proceeding with facility deletion. Deleting all data for facility <{}>".format(
dataset_id
)
)
with self._delete_context():
total_deleted = 0
# run the counting step
with self.start_progress(
total=delete_group.group_count()
) as update_progress:
update_progress(increment=0, message="Counting database objects")
total_count = delete_group.count(update_progress)
# no the deleting step
with self.start_progress(total=total_count) as update_progress:
update_progress(increment=0, message="Deleting database objects")
count, stats = delete_group.delete(update_progress)
total_deleted += count
# clear related cache
dataset_cache.clear()
# if count doesn't match, something doesn't seem right
if total_count != total_deleted:
msg = "Deleted count does not match total ({} != {})".format(
total_count, total_deleted
)
if strict:
raise CommandError("{}, aborting!".format(msg))
else:
logger.warning(msg)
logger.info("Deletion complete.")
@contextmanager
def _delete_context(self):
with DisablePostDeleteSignal(), transaction.atomic():
yield
def _get_facility_dataset(self, dataset_id):
return FacilityDataset.objects.filter(id=dataset_id)
def _get_certificates(self, dataset_id):
return (
Certificate.objects.filter(id=dataset_id)
.get_descendants(include_self=True)
.exclude(_private_key=None)
)
def _get_users(self, dataset_id):
user_id_filter = Q(
user_id__in=FacilityUser.objects.filter(dataset_id=dataset_id).values_list(
"pk", flat=True
)
)
dataset_id_filter = Q(dataset_id=dataset_id)
return GroupDeletion(
"User models",
querysets=[
LogEntry.objects.filter(user_id_filter),
DevicePermissions.objects.filter(user_id_filter),
PingbackNotificationDismissed.objects.filter(user_id_filter),
Collection.objects.filter(
Q(parent_id__isnull=True) & dataset_id_filter
),
Role.objects.filter(dataset_id_filter),
Membership.objects.filter(dataset_id_filter),
FacilityUser.objects.filter(dataset_id_filter),
],
)
def _get_class_models(self, dataset_id):
dataset_id_filter = Q(dataset_id=dataset_id)
return GroupDeletion(
"Class models",
querysets=[
ExamAssignment.objects.filter(dataset_id_filter),
Exam.objects.filter(dataset_id_filter),
LessonAssignment.objects.filter(dataset_id_filter),
Lesson.objects.filter(dataset_id_filter),
AdHocGroup.objects.filter(dataset_id_filter),
LearnerGroup.objects.filter(dataset_id_filter),
Classroom.objects.filter(dataset_id_filter),
],
)
def _get_log_models(self, dataset_id):
dataset_id_filter = Q(dataset_id=dataset_id)
return GroupDeletion(
"Log models",
querysets=[
ContentSessionLog.objects.filter(dataset_id_filter),
ContentSummaryLog.objects.filter(dataset_id_filter),
AttemptLog.objects.filter(dataset_id_filter),
ExamAttemptLog.objects.filter(dataset_id_filter),
ExamLog.objects.filter(dataset_id_filter),
MasteryLog.objects.filter(dataset_id_filter),
UserSessionLog.objects.filter(dataset_id_filter),
],
)
def _get_morango_models(self, dataset_id):
querysets = [
DatabaseMaxCounter.objects.filter(partition__startswith=dataset_id)
]
stores = Store.objects.filter(partition__startswith=dataset_id)
store_ids = stores.values_list("pk", flat=True)
for store_ids_chunk in chunk(list(store_ids), 300):
querysets.append(
RecordMaxCounter.objects.filter(store_model_id__in=store_ids_chunk)
)
# append after RecordMaxCounter
querysets.append(stores)
certificates = self._get_certificates(dataset_id)
certificate_ids = certificates.distinct().values_list("pk", flat=True)
for certificate_id_chunk in chunk(certificate_ids, 300):
sync_sessions = SyncSession.objects.filter(
Q(client_certificate_id__in=certificate_id_chunk)
| Q(server_certificate_id__in=certificate_id_chunk)
)
sync_session_ids = sync_sessions.distinct().values_list("pk", flat=True)
transfer_sessions = TransferSession.objects.filter(
sync_session_id__in=sync_session_ids
)
transfer_session_filter = Q(
transfer_session_id__in=transfer_sessions.values_list("pk", flat=True)
)
querysets.extend(
[
RecordMaxCounterBuffer.objects.filter(transfer_session_filter),
Buffer.objects.filter(transfer_session_filter),
transfer_sessions,
sync_sessions,
certificates,
]
)
return GroupDeletion("Morango models", groups=querysets)
|
the-stack_0_27222
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import textwrap
from sqlalchemy.engine.url import make_url
from superset import app, db
from superset.models.core import Database
from superset.utils import get_main_database
from .base_tests import SupersetTestCase
class DatabaseModelTestCase(SupersetTestCase):
def test_database_schema_presto(self):
sqlalchemy_uri = 'presto://presto.airbnb.io:8080/hive/default'
model = Database(sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEquals('hive/default', db)
db = make_url(model.get_sqla_engine(schema='core_db').url).database
self.assertEquals('hive/core_db', db)
sqlalchemy_uri = 'presto://presto.airbnb.io:8080/hive'
model = Database(sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEquals('hive', db)
db = make_url(model.get_sqla_engine(schema='core_db').url).database
self.assertEquals('hive/core_db', db)
def test_database_schema_postgres(self):
sqlalchemy_uri = 'postgresql+psycopg2://postgres.airbnb.io:5439/prod'
model = Database(sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEquals('prod', db)
db = make_url(model.get_sqla_engine(schema='foo').url).database
self.assertEquals('prod', db)
def test_database_schema_hive(self):
sqlalchemy_uri = 'hive://[email protected]:10000/default?auth=NOSASL'
model = Database(sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEquals('default', db)
db = make_url(model.get_sqla_engine(schema='core_db').url).database
self.assertEquals('core_db', db)
def test_database_schema_mysql(self):
sqlalchemy_uri = 'mysql://root@localhost/superset'
model = Database(sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEquals('superset', db)
db = make_url(model.get_sqla_engine(schema='staging').url).database
self.assertEquals('staging', db)
def test_database_impersonate_user(self):
uri = 'mysql://root@localhost'
example_user = 'giuseppe'
model = Database(sqlalchemy_uri=uri)
model.impersonate_user = True
user_name = make_url(model.get_sqla_engine(user_name=example_user).url).username
self.assertEquals(example_user, user_name)
model.impersonate_user = False
user_name = make_url(model.get_sqla_engine(user_name=example_user).url).username
self.assertNotEquals(example_user, user_name)
def test_select_star(self):
main_db = get_main_database(db.session)
table_name = 'bart_lines'
sql = main_db.select_star(
table_name, show_cols=False, latest_partition=False)
expected = textwrap.dedent("""\
SELECT *
FROM {table_name}
LIMIT 100""".format(**locals()))
assert sql.startswith(expected)
sql = main_db.select_star(
table_name, show_cols=True, latest_partition=False)
expected = textwrap.dedent("""\
SELECT color,
name,
path_json,
polyline
FROM bart_lines
LIMIT 100""".format(**locals()))
assert sql.startswith(expected)
def test_grains_dict(self):
uri = 'mysql://root@localhost'
database = Database(sqlalchemy_uri=uri)
d = database.grains_dict()
self.assertEquals(d.get('day').function, 'DATE({col})')
self.assertEquals(d.get('P1D').function, 'DATE({col})')
self.assertEquals(d.get('Time Column').function, '{col}')
def test_single_statement(self):
main_db = get_main_database(db.session)
if main_db.backend == 'mysql':
df = main_db.get_df('SELECT 1', None)
self.assertEquals(df.iat[0, 0], 1)
df = main_db.get_df('SELECT 1;', None)
self.assertEquals(df.iat[0, 0], 1)
def test_multi_statement(self):
main_db = get_main_database(db.session)
if main_db.backend == 'mysql':
df = main_db.get_df('USE superset; SELECT 1', None)
self.assertEquals(df.iat[0, 0], 1)
df = main_db.get_df("USE superset; SELECT ';';", None)
self.assertEquals(df.iat[0, 0], ';')
class SqlaTableModelTestCase(SupersetTestCase):
def test_get_timestamp_expression(self):
tbl = self.get_table_by_name('birth_names')
ds_col = tbl.get_column('ds')
sqla_literal = ds_col.get_timestamp_expression(None)
self.assertEquals(str(sqla_literal.compile()), 'ds')
sqla_literal = ds_col.get_timestamp_expression('P1D')
compiled = '{}'.format(sqla_literal.compile())
if tbl.database.backend == 'mysql':
self.assertEquals(compiled, 'DATE(ds)')
ds_col.expression = 'DATE_ADD(ds, 1)'
sqla_literal = ds_col.get_timestamp_expression('P1D')
compiled = '{}'.format(sqla_literal.compile())
if tbl.database.backend == 'mysql':
self.assertEquals(compiled, 'DATE(DATE_ADD(ds, 1))')
def test_get_timestamp_expression_epoch(self):
tbl = self.get_table_by_name('birth_names')
ds_col = tbl.get_column('ds')
ds_col.expression = None
ds_col.python_date_format = 'epoch_s'
sqla_literal = ds_col.get_timestamp_expression(None)
compiled = '{}'.format(sqla_literal.compile())
if tbl.database.backend == 'mysql':
self.assertEquals(compiled, 'from_unixtime(ds)')
ds_col.python_date_format = 'epoch_s'
sqla_literal = ds_col.get_timestamp_expression('P1D')
compiled = '{}'.format(sqla_literal.compile())
if tbl.database.backend == 'mysql':
self.assertEquals(compiled, 'DATE(from_unixtime(ds))')
ds_col.expression = 'DATE_ADD(ds, 1)'
sqla_literal = ds_col.get_timestamp_expression('P1D')
compiled = '{}'.format(sqla_literal.compile())
if tbl.database.backend == 'mysql':
self.assertEquals(compiled, 'DATE(from_unixtime(DATE_ADD(ds, 1)))')
def test_get_timestamp_expression_backward(self):
tbl = self.get_table_by_name('birth_names')
ds_col = tbl.get_column('ds')
ds_col.expression = None
ds_col.python_date_format = None
sqla_literal = ds_col.get_timestamp_expression('day')
compiled = '{}'.format(sqla_literal.compile())
if tbl.database.backend == 'mysql':
self.assertEquals(compiled, 'DATE(ds)')
ds_col.expression = None
ds_col.python_date_format = None
sqla_literal = ds_col.get_timestamp_expression('Time Column')
compiled = '{}'.format(sqla_literal.compile())
if tbl.database.backend == 'mysql':
self.assertEquals(compiled, 'ds')
def test_sql_mutator(self):
tbl = self.get_table_by_name('birth_names')
query_obj = dict(
groupby=[],
metrics=[],
filter=[],
is_timeseries=False,
columns=['name'],
granularity=None,
from_dttm=None, to_dttm=None,
is_prequery=False,
extras={},
)
sql = tbl.get_query_str(query_obj)
self.assertNotIn('--COMMENT', sql)
def mutator(*args):
return '--COMMENT\n' + args[0]
app.config['SQL_QUERY_MUTATOR'] = mutator
sql = tbl.get_query_str(query_obj)
self.assertIn('--COMMENT', sql)
app.config['SQL_QUERY_MUTATOR'] = None
|
the-stack_0_27224
|
import lyricsgenius
import pandas as pd
import os
api_key = "key_here"
genius_token = "token_here"
genius = lyricsgenius.Genius(genius_token)
def get_id(song):
for platform in song.media:
if platform['provider'] == "youtube":
video_id = platform['url'].split("?v=")[-1]
return video_id
def get_data_from_genius(artist_names, count=5, filename="dataset"):
columns = ["title", "artist", "album", "year",
"yt_url", "yt_id", "genius_url", "yt_id",
"featured_artists", "writer_artists", "producer_artists",
"lyrics"]
df = pd.DataFrame(columns=columns)
if not os.path.isfile(f"lyrics/{filename}.csv"):
df.to_csv(f"lyrics/{filename}.csv")
for artist_name in artist_names:
artist = genius.search_artist(artist_name, max_songs=count)
songs = [song.title for song in artist.songs]
for title in songs:
try:
song = genius.search_song(title, artist_name)
title = song.title
artist = song.artist
album = song.album
year = song.year
yt_url = [item["url"] for item in song.media]
yt_id = get_id(song)
genius_url = song.url
featured_artist = [item['name'] for item in song.featured_artists]
writer_artist = [artist['name'] for artist in song.writer_artists]
producer_artist = [producers['name'].strip(u'\u200b') for producers in song.producer_artists]
lyrics = song.lyrics.replace('\n', ' ')
except:
print("Problem with song, go next...")
data_dict = {
"title": title,
"artist": artist,
"album": album,
"year": year,
"yt_url": yt_url,
"yt_id": yt_id,
"genius_url": genius_url,
"featured_artists": featured_artist,
"writer_artists": writer_artist,
"producer_artists": producer_artist,
"lyrics": lyrics}
df = df.append(data_dict, ignore_index=True)
df.to_csv(f"lyrics/{filename}.csv", mode="a", index=False,
header=False)
df = pd.DataFrame(columns=columns)
|
the-stack_0_27226
|
import json
import os
import stat
import subprocess
import fnmatch
import re
import glob
import git
import sys
import shutil
import yaml
import google.auth
from google.cloud import storage
import base64
import urllib.parse
import logging
import warnings
from distutils.util import strtobool
from distutils.version import LooseVersion
from datetime import datetime, timedelta
from zipfile import ZipFile, ZIP_DEFLATED
from typing import Tuple, Any, Union
from Tests.Marketplace.marketplace_constants import PackFolders, Metadata, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \
PackTags, PackIgnored, Changelog
import Tests.Marketplace.marketplace_statistics as mp_statistics
from Utils.release_notes_generator import aggregate_release_notes_for_marketplace
class Pack(object):
""" Class that manipulates and manages the upload of pack's artifact and metadata to cloud storage.
Args:
pack_name (str): Pack root folder name.
pack_path (str): Full path to pack folder.
Attributes:
PACK_INITIAL_VERSION (str): pack initial version that will be used as default.
CHANGELOG_JSON (str): changelog json full name, may be changed in the future.
README (str): pack's readme file name.
METADATA (str): pack's metadata file name, the one that will be deployed to cloud storage.
USER_METADATA (str); user metadata file name, the one that located in content repo.
EXCLUDE_DIRECTORIES (list): list of directories to excluded before uploading pack zip to storage.
AUTHOR_IMAGE_NAME (str): author image file name.
RELEASE_NOTES (str): release notes folder name.
"""
PACK_INITIAL_VERSION = "1.0.0"
CHANGELOG_JSON = "changelog.json"
README = "README.md"
USER_METADATA = "pack_metadata.json"
METADATA = "metadata.json"
AUTHOR_IMAGE_NAME = "Author_image.png"
EXCLUDE_DIRECTORIES = [PackFolders.TEST_PLAYBOOKS.value]
RELEASE_NOTES = "ReleaseNotes"
def __init__(self, pack_name, pack_path):
self._pack_name = pack_name
self._pack_path = pack_path
self._status = None
self._public_storage_path = ""
self._remove_files_list = [] # tracking temporary files, in order to delete in later step
self._server_min_version = "99.99.99" # initialized min version
self._latest_version = None # pack latest version found in changelog
self._support_type = None # initialized in load_user_metadata function
self._current_version = None # initialized in load_user_metadata function
self._hidden = False # initialized in load_user_metadata function
self._description = None # initialized in load_user_metadata function
self._display_name = None # initialized in load_user_metadata function
self._user_metadata = None # initialized in load_user_metadata function
self.eula_link = None # initialized in load_user_metadata function
self._is_feed = False # a flag that specifies if pack is a feed pack
self._downloads_count = 0 # number of pack downloads
self._bucket_url = None # URL of where the pack was uploaded.
self._aggregated = False # weather the pack's rn was aggregated or not.
self._aggregation_str = "" # the aggregation string msg when the pack versions are aggregated
self._create_date = None # initialized in enhance_pack_attributes function
self._update_date = None # initialized in enhance_pack_attributes function
self._uploaded_author_image = False # whether the pack author image was uploaded or not
self._uploaded_integration_images = [] # the list of all integration images that were uploaded for the pack
self._support_details = None # initialized in enhance_pack_attributes function
self._author = None # initialized in enhance_pack_attributes function
self._certification = None # initialized in enhance_pack_attributes function
self._legacy = None # initialized in enhance_pack_attributes function
self._author_image = None # initialized in upload_author_image function
self._displayed_integration_images = None # initialized in upload_integration_images function
self._price = 0 # initialized in enhance_pack_attributes function
self._is_private_pack = False # initialized in enhance_pack_attributes function
self._is_premium = False # initialized in enhance_pack_attributes function
self._vendor_id = None # initialized in enhance_pack_attributes function
self._partner_id = None # initialized in enhance_pack_attributes function
self._partner_name = None # initialized in enhance_pack_attributes function
self._content_commit_hash = None # initialized in enhance_pack_attributes function
self._preview_only = None # initialized in enhance_pack_attributes function
self._tags = None # initialized in enhance_pack_attributes function
self._categories = None # initialized in enhance_pack_attributes function
self._content_items = None # initialized in collect_content_items function
self._search_rank = None # initialized in enhance_pack_attributes function
self._related_integration_images = None # initialized in enhance_pack_attributes function
self._use_cases = None # initialized in enhance_pack_attributes function
self._keywords = None # initialized in enhance_pack_attributes function
self._dependencies = None # initialized in enhance_pack_attributes function
self._pack_statistics_handler = None # initialized in enhance_pack_attributes function
self._contains_transformer = False # initialized in collect_content_items function
self._contains_filter = False # initialized in collect_content_items function
self._is_missing_dependencies = False # a flag that specifies if pack is missing dependencies
@property
def name(self):
""" str: pack root folder name.
"""
return self._pack_name
@property
def path(self):
""" str: pack folder full path.
"""
return self._pack_path
@property
def latest_version(self):
""" str: pack latest version from sorted keys of changelog.json file.
"""
if not self._latest_version:
self._latest_version = self._get_latest_version()
return self._latest_version
else:
return self._latest_version
@latest_version.setter
def latest_version(self, latest_version):
self._latest_version = latest_version
@property
def status(self):
""" str: current status of the packs.
"""
return self._status
@property
def is_feed(self):
"""
bool: whether the pack is a feed pack
"""
return self._is_feed
@is_feed.setter
def is_feed(self, is_feed):
""" setter of is_feed
"""
self._is_feed = is_feed
@status.setter
def status(self, status_value):
""" setter of pack current status.
"""
self._status = status_value
@property
def public_storage_path(self):
""" str: public gcs path of uploaded pack.
"""
return self._public_storage_path
@public_storage_path.setter
def public_storage_path(self, path_value):
""" setter of public gcs path of uploaded pack.
"""
self._public_storage_path = path_value
@property
def support_type(self):
""" str: support type of the pack.
"""
return self._support_type
@support_type.setter
def support_type(self, support_value):
""" setter of support type of the pack.
"""
self._support_type = support_value
@property
def current_version(self):
""" str: current version of the pack (different from latest_version).
"""
return self._current_version
@current_version.setter
def current_version(self, current_version_value):
""" setter of current version of the pack.
"""
self._current_version = current_version_value
@property
def hidden(self):
""" bool: internal content field for preventing pack from being displayed.
"""
return self._hidden
@hidden.setter
def hidden(self, hidden_value):
""" setter of hidden property of the pack.
"""
self._hidden = hidden_value
@property
def description(self):
""" str: Description of the pack (found in pack_metadata.json).
"""
return self._description
@description.setter
def description(self, description_value):
""" setter of description property of the pack.
"""
self._description = description_value
@property
def display_name(self):
""" str: Display name of the pack (found in pack_metadata.json).
"""
return self._display_name
@property
def user_metadata(self):
""" dict: the pack_metadata.
"""
return self._user_metadata
@display_name.setter
def display_name(self, display_name_value):
""" setter of display name property of the pack.
"""
self._display_name = display_name_value
@property
def server_min_version(self):
""" str: server min version according to collected items.
"""
if not self._server_min_version or self._server_min_version == "99.99.99":
return Metadata.SERVER_DEFAULT_MIN_VERSION
else:
return self._server_min_version
@property
def downloads_count(self):
""" str: packs downloads count.
"""
return self._downloads_count
@downloads_count.setter
def downloads_count(self, download_count_value):
""" setter of downloads count property of the pack.
"""
self._downloads_count = download_count_value
@property
def bucket_url(self):
""" str: pack bucket_url.
"""
return self._bucket_url
@bucket_url.setter
def bucket_url(self, bucket_url):
""" str: pack bucket_url.
"""
self._bucket_url = bucket_url
@property
def aggregated(self):
""" str: pack aggregated release notes or not.
"""
return self._aggregated
@property
def aggregation_str(self):
""" str: pack aggregated release notes or not.
"""
return self._aggregation_str
@property
def create_date(self):
""" str: pack create date.
"""
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def update_date(self):
""" str: pack update date.
"""
return self._update_date
@update_date.setter
def update_date(self, value):
self._update_date = value
@property
def uploaded_author_image(self):
""" bool: whether the pack author image was uploaded or not.
"""
return self._uploaded_author_image
@uploaded_author_image.setter
def uploaded_author_image(self, uploaded_author_image):
""" bool: whether the pack author image was uploaded or not.
"""
self._uploaded_author_image = uploaded_author_image
@property
def uploaded_integration_images(self):
""" str: the list of uploaded integration images
"""
return self._uploaded_integration_images
@property
def is_missing_dependencies(self):
return self._is_missing_dependencies
def _get_latest_version(self):
""" Return latest semantic version of the pack.
In case that changelog.json file was not found, default value of 1.0.0 will be returned.
Otherwise, keys of semantic pack versions will be collected and sorted in descending and return latest version.
For additional information regarding changelog.json format go to issue #19786
Returns:
str: Pack latest version.
"""
changelog_path = os.path.join(self._pack_path, Pack.CHANGELOG_JSON)
if not os.path.exists(changelog_path):
return self.PACK_INITIAL_VERSION
with open(changelog_path, "r") as changelog_file:
changelog = json.load(changelog_file)
pack_versions = [LooseVersion(v) for v in changelog.keys()]
pack_versions.sort(reverse=True)
return pack_versions[0].vstring
@staticmethod
def organize_integration_images(pack_integration_images: list, pack_dependencies_integration_images_dict: dict,
pack_dependencies_by_download_count: list):
""" By Issue #32038
1. Sort pack integration images by alphabetical order
2. Sort pack dependencies by download count
Pack integration images are shown before pack dependencies integration images
Args:
pack_integration_images (list): list of pack integration images
pack_dependencies_integration_images_dict: a mapping of pack dependency name to its integration images
pack_dependencies_by_download_count: a list of pack dependencies sorted by download count
Returns:
list: list of sorted integration images
"""
def sort_by_name(integration_image: dict):
return integration_image.get('name', '')
# sort packs integration images
pack_integration_images = sorted(pack_integration_images, key=sort_by_name)
# sort pack dependencies integration images
all_dep_int_imgs = pack_integration_images
for dep_pack_name in pack_dependencies_by_download_count:
if dep_pack_name in pack_dependencies_integration_images_dict:
logging.info(f'Adding {dep_pack_name} to deps int imgs')
dep_int_imgs = sorted(pack_dependencies_integration_images_dict[dep_pack_name], key=sort_by_name)
for dep_int_img in dep_int_imgs:
if dep_int_img not in all_dep_int_imgs: # avoid duplicates
all_dep_int_imgs.append(dep_int_img)
return all_dep_int_imgs
@staticmethod
def _get_all_pack_images(pack_integration_images, display_dependencies_images, dependencies_data,
pack_dependencies_by_download_count):
""" Returns data of uploaded pack integration images and it's path in gcs. Pack dependencies integration images
are added to that result as well.
Args:
pack_integration_images (list): list of uploaded to gcs integration images and it paths in gcs.
display_dependencies_images (list): list of pack names of additional dependencies images to display.
dependencies_data (dict): all level dependencies data.
pack_dependencies_by_download_count (list): list of pack names that are dependencies of the given pack
sorted by download count.
Returns:
list: collection of integration display name and it's path in gcs.
"""
dependencies_integration_images_dict = {}
additional_dependencies_data = {k: v for k, v in dependencies_data.items() if k in display_dependencies_images}
for dependency_data in additional_dependencies_data.values():
for dep_int_img in dependency_data.get('integrations', []):
dep_int_img_gcs_path = dep_int_img.get('imagePath', '') # image public url
dep_int_img['name'] = Pack.remove_contrib_suffix_from_name(dep_int_img.get('name', ''))
dep_pack_name = os.path.basename(os.path.dirname(dep_int_img_gcs_path))
if dep_pack_name not in display_dependencies_images:
continue # skip if integration image is not part of displayed images of the given pack
if dep_int_img not in pack_integration_images: # avoid duplicates in list
if dep_pack_name in dependencies_integration_images_dict:
dependencies_integration_images_dict[dep_pack_name].append(dep_int_img)
else:
dependencies_integration_images_dict[dep_pack_name] = [dep_int_img]
return Pack.organize_integration_images(
pack_integration_images, dependencies_integration_images_dict, pack_dependencies_by_download_count
)
def is_feed_pack(self, yaml_content, yaml_type):
"""
Checks if an integration is a feed integration. If so, updates Pack._is_feed
Args:
yaml_content: The yaml content extracted by yaml.safe_load().
yaml_type: The type of object to check. Should be 'Playbook' or 'Integration'.
Returns:
Doesn't return
"""
if yaml_type == 'Integration':
if yaml_content.get('script', {}).get('feed', False) is True:
self._is_feed = True
if yaml_type == 'Playbook':
if yaml_content.get('name').startswith('TIM '):
self._is_feed = True
@staticmethod
def _clean_release_notes(release_notes_lines):
return re.sub(r'<\!--.*?-->', '', release_notes_lines, flags=re.DOTALL)
@staticmethod
def _parse_pack_dependencies(first_level_dependencies, all_level_pack_dependencies_data):
""" Parses user defined dependencies and returns dictionary with relevant data about each dependency pack.
Args:
first_level_dependencies (dict): first lever dependencies that were retrieved
from user pack_metadata.json file.
all_level_pack_dependencies_data (dict): all level pack dependencies data.
Returns:
dict: parsed dictionary with pack dependency data.
"""
parsed_result = {}
dependencies_data = {k: v for (k, v) in all_level_pack_dependencies_data.items()
if k in first_level_dependencies.keys() or k == GCPConfig.BASE_PACK}
for dependency_id, dependency_data in dependencies_data.items():
parsed_result[dependency_id] = {
"mandatory": first_level_dependencies.get(dependency_id, {}).get('mandatory', True),
"minVersion": dependency_data.get(Metadata.CURRENT_VERSION, Pack.PACK_INITIAL_VERSION),
"author": dependency_data.get('author', ''),
"name": dependency_data.get('name') if dependency_data.get('name') else dependency_id,
"certification": dependency_data.get('certification', 'certified')
}
return parsed_result
@staticmethod
def _create_support_section(support_type, support_url=None, support_email=None):
""" Creates support dictionary that is part of metadata.
In case of support type xsoar, adds default support url. If support is xsoar and support url is defined and
doesn't match xsoar default url, warning is raised.
Args:
support_type (str): support type of pack.
support_url (str): support full url.
support_email (str): support email address.
Returns:
dict: supported data dictionary.
"""
support_details = {}
if support_url: # set support url from user input
support_details['url'] = support_url
elif support_type == Metadata.XSOAR_SUPPORT: # in case support type is xsoar, set default xsoar support url
support_details['url'] = Metadata.XSOAR_SUPPORT_URL
# add support email if defined
if support_email:
support_details['email'] = support_email
return support_details
@staticmethod
def _get_author(support_type, author=None):
""" Returns pack author. In case support type is xsoar, more additional validation are applied.
Args:
support_type (str): support type of pack.
author (str): author of the pack.
Returns:
str: returns author from the input.
"""
if support_type == Metadata.XSOAR_SUPPORT and not author:
return Metadata.XSOAR_AUTHOR # returned xsoar default author
elif support_type == Metadata.XSOAR_SUPPORT and author != Metadata.XSOAR_AUTHOR:
logging.warning(f"{author} author doest not match {Metadata.XSOAR_AUTHOR} default value")
return author
else:
return author
@staticmethod
def _get_certification(support_type, certification=None):
""" Returns pack certification.
In case support type is xsoar or partner, CERTIFIED is returned.
In case support is not xsoar or partner but pack_metadata has certification field, certification value will be
taken from pack_metadata defined value.
Otherwise empty certification value (empty string) will be returned
Args:
support_type (str): support type of pack.
certification (str): certification value from pack_metadata, if exists.
Returns:
str: certification value
"""
if support_type in [Metadata.XSOAR_SUPPORT, Metadata.PARTNER_SUPPORT]:
return Metadata.CERTIFIED
elif certification:
return certification
else:
return ""
def _get_tags_from_landing_page(self, landing_page_sections: dict) -> set:
"""
Build the pack's tag list according to the user metadata and the landingPage sections file.
Args:
landing_page_sections (dict): landingPage sections and the packs in each one of them.
Returns:
set: Pack's tags.
"""
tags = set()
sections = landing_page_sections.get('sections', []) if landing_page_sections else []
for section in sections:
if self._pack_name in landing_page_sections.get(section, []):
tags.add(section)
return tags
def _parse_pack_metadata(self, build_number, commit_hash):
""" Parses pack metadata according to issue #19786 and #20091. Part of field may change over the time.
Args:
build_number (str): circleCI build number.
commit_hash (str): current commit hash.
Returns:
dict: parsed pack metadata.
"""
pack_metadata = {
Metadata.NAME: self._display_name or self._pack_name,
Metadata.ID: self._pack_name,
Metadata.DESCRIPTION: self._description or self._pack_name,
Metadata.CREATED: self._create_date,
Metadata.UPDATED: self._update_date,
Metadata.LEGACY: self._legacy,
Metadata.SUPPORT: self._support_type,
Metadata.SUPPORT_DETAILS: self._support_details,
Metadata.EULA_LINK: self.eula_link,
Metadata.AUTHOR: self._author,
Metadata.AUTHOR_IMAGE: self._author_image,
Metadata.CERTIFICATION: self._certification,
Metadata.PRICE: self._price,
Metadata.SERVER_MIN_VERSION: self.user_metadata.get(Metadata.SERVER_MIN_VERSION) or self.server_min_version,
Metadata.CURRENT_VERSION: self.user_metadata.get(Metadata.CURRENT_VERSION, ''),
Metadata.VERSION_INFO: build_number,
Metadata.COMMIT: commit_hash,
Metadata.DOWNLOADS: self._downloads_count,
Metadata.TAGS: list(self._tags),
Metadata.CATEGORIES: self._categories,
Metadata.CONTENT_ITEMS: self._content_items,
Metadata.SEARCH_RANK: self._search_rank,
Metadata.INTEGRATIONS: self._related_integration_images,
Metadata.USE_CASES: self._use_cases,
Metadata.KEY_WORDS: self._keywords,
Metadata.DEPENDENCIES: self._dependencies
}
if self._is_private_pack:
pack_metadata.update({
Metadata.PREMIUM: self._is_premium,
Metadata.VENDOR_ID: self._vendor_id,
Metadata.PARTNER_ID: self._partner_id,
Metadata.PARTNER_NAME: self._partner_name,
Metadata.CONTENT_COMMIT_HASH: self._content_commit_hash,
Metadata.PREVIEW_ONLY: self._preview_only
})
return pack_metadata
def _load_pack_dependencies(self, index_folder_path, pack_names):
""" Loads dependencies metadata and returns mapping of pack id and it's loaded data.
Args:
index_folder_path (str): full path to download index folder.
pack_names (set): List of all packs.
Returns:
dict: pack id as key and loaded metadata of packs as value.
bool: True if the pack is missing dependencies, False otherwise.
"""
dependencies_data_result = {}
first_level_dependencies = self.user_metadata.get(Metadata.DEPENDENCIES, {})
all_level_displayed_dependencies = self.user_metadata.get(Metadata.DISPLAYED_IMAGES, [])
dependencies_ids = {d for d in first_level_dependencies.keys()}
dependencies_ids.update(all_level_displayed_dependencies)
if self._pack_name != GCPConfig.BASE_PACK: # check that current pack isn't Base Pack in order to prevent loop
dependencies_ids.add(GCPConfig.BASE_PACK) # Base pack is always added as pack dependency
for dependency_pack_id in dependencies_ids:
dependency_metadata_path = os.path.join(index_folder_path, dependency_pack_id, Pack.METADATA)
if os.path.exists(dependency_metadata_path):
with open(dependency_metadata_path, 'r') as metadata_file:
dependency_metadata = json.load(metadata_file)
dependencies_data_result[dependency_pack_id] = dependency_metadata
elif dependency_pack_id in pack_names:
# If the pack is dependent on a new pack (which is not yet in the index.json)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.json.
# We will go over the pack again to add what was missing
self._is_missing_dependencies = True
logging.warning(f"{self._pack_name} pack dependency with id {dependency_pack_id} "
f"was not found in index, marking it as missing dependencies - to be resolved in next"
f" iteration over packs")
else:
logging.warning(f"{self._pack_name} pack dependency with id {dependency_pack_id} was not found")
return dependencies_data_result, self._is_missing_dependencies
def _get_updated_changelog_entry(self, changelog: dict, version: str, release_notes: str = None,
version_display_name: str = None, build_number_with_prefix: str = None,
released_time: str = None):
"""
Args:
changelog (dict): The changelog from the production bucket.
version (str): The version that is the key in the changelog of the entry wished to be updated.
release_notes (str): The release notes lines to update the entry with.
version_display_name (str): The version display name to update the entry with.
build_number_with_prefix(srt): the build number to modify the entry to, including the prefix R (if present).
released_time: The released time to update the entry with.
"""
changelog_entry = changelog.get(version)
if not changelog_entry:
raise Exception('The given version is not a key in the changelog')
version_display_name = \
version_display_name if version_display_name else changelog_entry[Changelog.DISPLAY_NAME].split('-')[0]
build_number_with_prefix = \
build_number_with_prefix if build_number_with_prefix else changelog_entry[Changelog.DISPLAY_NAME].split('-')[1]
changelog_entry[Changelog.RELEASE_NOTES] = release_notes if release_notes else changelog_entry[Changelog.RELEASE_NOTES]
changelog_entry[Changelog.DISPLAY_NAME] = f'{version_display_name} - {build_number_with_prefix}'
changelog_entry[Changelog.RELEASED] = released_time if released_time else changelog_entry[Changelog.RELEASED]
return changelog_entry
def _create_changelog_entry(self, release_notes, version_display_name, build_number, pack_was_modified=False,
new_version=True, initial_release=False):
""" Creates dictionary entry for changelog.
Args:
release_notes (str): release notes md.
version_display_name (str): display name version.
build_number (srt): current build number.
pack_was_modified (bool): whether the pack was modified.
new_version (bool): whether the entry is new or not. If not new, R letter will be appended to build number.
initial_release (bool): whether the entry is an initial release or not.
Returns:
dict: release notes entry of changelog
"""
if new_version:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - {build_number}',
Changelog.RELEASED: datetime.utcnow().strftime(Metadata.DATE_FORMAT)}
elif initial_release:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - {build_number}',
Changelog.RELEASED: self._create_date}
elif pack_was_modified:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - R{build_number}',
Changelog.RELEASED: datetime.utcnow().strftime(Metadata.DATE_FORMAT)}
return {}
def remove_unwanted_files(self, delete_test_playbooks=True):
""" Iterates over pack folder and removes hidden files and unwanted folders.
Args:
delete_test_playbooks (bool): whether to delete test playbooks folder.
Returns:
bool: whether the operation succeeded.
"""
task_status = True
try:
for directory in Pack.EXCLUDE_DIRECTORIES:
if delete_test_playbooks and os.path.isdir(f'{self._pack_path}/{directory}'):
shutil.rmtree(f'{self._pack_path}/{directory}')
logging.info(f"Deleted {directory} directory from {self._pack_name} pack")
for root, dirs, files in os.walk(self._pack_path, topdown=True):
for pack_file in files:
full_file_path = os.path.join(root, pack_file)
# removing unwanted files
if pack_file.startswith('.') \
or pack_file in [Pack.AUTHOR_IMAGE_NAME, Pack.USER_METADATA] \
or pack_file in self._remove_files_list:
os.remove(full_file_path)
logging.info(f"Deleted pack {pack_file} file for {self._pack_name} pack")
continue
except Exception:
task_status = False
logging.exception(f"Failed to delete ignored files for pack {self._pack_name}")
finally:
return task_status
def sign_pack(self, signature_string=None):
""" Signs pack folder and creates signature file.
Args:
signature_string (str): Base64 encoded string used to sign the pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
if signature_string:
with open("keyfile", "wb") as keyfile:
keyfile.write(signature_string.encode())
arg = f'./signDirectory {self._pack_path} keyfile base64'
signing_process = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = signing_process.communicate()
if err:
logging.error(f"Failed to sign pack for {self._pack_name} - {str(err)}")
return
logging.info(f"Signed {self._pack_name} pack successfully")
else:
logging.info(f"No signature provided. Skipped signing {self._pack_name} pack")
task_status = True
except Exception:
logging.exception(f"Failed to sign pack for {self._pack_name}")
finally:
return task_status
@staticmethod
def encrypt_pack(zip_pack_path, pack_name, encryption_key, extract_destination_path,
private_artifacts_dir, secondary_encryption_key):
""" decrypt the pack in order to see that the pack was encrypted in the first place.
Args:
zip_pack_path (str): The path to the encrypted zip pack.
pack_name (str): The name of the pack that should be encrypted.
encryption_key (str): The key which we can decrypt the pack with.
extract_destination_path (str): The path in which the pack resides.
private_artifacts_dir (str): The chosen name for the private artifacts diriectory.
secondary_encryption_key (str) : A second key which we can decrypt the pack with.
"""
try:
current_working_dir = os.getcwd()
shutil.copy('./encryptor', os.path.join(extract_destination_path, 'encryptor'))
os.chmod(os.path.join(extract_destination_path, 'encryptor'), stat.S_IXOTH)
os.chdir(extract_destination_path)
subprocess.call('chmod +x ./encryptor', shell=True)
output_file = zip_pack_path.replace("_not_encrypted.zip", ".zip")
full_command = f'./encryptor ./{pack_name}_not_encrypted.zip {output_file} "{encryption_key}"'
subprocess.call(full_command, shell=True)
secondary_encryption_key_output_file = zip_pack_path.replace("_not_encrypted.zip", ".enc2.zip")
full_command_with_secondary_encryption = f'./encryptor ./{pack_name}_not_encrypted.zip ' \
f'{secondary_encryption_key_output_file}' \
f' "{secondary_encryption_key}"'
subprocess.call(full_command_with_secondary_encryption, shell=True)
new_artefacts = os.path.join(current_working_dir, private_artifacts_dir)
if os.path.exists(new_artefacts):
shutil.rmtree(new_artefacts)
os.mkdir(path=new_artefacts)
shutil.copy(zip_pack_path, os.path.join(new_artefacts, f'{pack_name}_not_encrypted.zip'))
shutil.copy(output_file, os.path.join(new_artefacts, f'{pack_name}.zip'))
shutil.copy(secondary_encryption_key_output_file, os.path.join(new_artefacts, f'{pack_name}.enc2.zip'))
os.chdir(current_working_dir)
except (subprocess.CalledProcessError, shutil.Error) as error:
print(f"Error while trying to encrypt pack. {error}")
def decrypt_pack(self, encrypted_zip_pack_path, decryption_key):
""" decrypt the pack in order to see that the pack was encrypted in the first place.
Args:
encrypted_zip_pack_path (str): The path for the encrypted zip pack.
decryption_key (str): The key which we can decrypt the pack with.
Returns:
bool: whether the decryption succeeded.
"""
try:
current_working_dir = os.getcwd()
extract_destination_path = f'{current_working_dir}/decrypt_pack_dir'
os.mkdir(extract_destination_path)
shutil.copy('./decryptor', os.path.join(extract_destination_path, 'decryptor'))
secondary_encrypted_pack_path = os.path.join(extract_destination_path, 'encrypted_zip_pack.zip')
shutil.copy(encrypted_zip_pack_path, secondary_encrypted_pack_path)
os.chmod(os.path.join(extract_destination_path, 'decryptor'), stat.S_IXOTH)
output_decrypt_file_path = f"{extract_destination_path}/decrypt_pack.zip"
os.chdir(extract_destination_path)
subprocess.call('chmod +x ./decryptor', shell=True)
full_command = f'./decryptor {secondary_encrypted_pack_path} {output_decrypt_file_path} "{decryption_key}"'
process = subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
shutil.rmtree(extract_destination_path)
os.chdir(current_working_dir)
if stdout:
logging.info(str(stdout))
if stderr:
logging.error(f"Error: Premium pack {self._pack_name} should be encrypted, but isn't.")
return False
return True
except subprocess.CalledProcessError as error:
logging.exception(f"Error while trying to decrypt pack. {error}")
return False
def is_pack_encrypted(self, encrypted_zip_pack_path, decryption_key):
""" Checks if the pack is encrypted by trying to decrypt it.
Args:
encrypted_zip_pack_path (str): The path for the encrypted zip pack.
decryption_key (str): The key which we can decrypt the pack with.
Returns:
bool: whether the pack is encrypted.
"""
return self.decrypt_pack(encrypted_zip_pack_path, decryption_key)
def zip_pack(self, extract_destination_path="", pack_name="", encryption_key="",
private_artifacts_dir='private_artifacts', secondary_encryption_key=""):
""" Zips pack folder.
Returns:
bool: whether the operation succeeded.
str: full path to created pack zip.
"""
zip_pack_path = f"{self._pack_path}.zip" if not encryption_key else f"{self._pack_path}_not_encrypted.zip"
task_status = False
try:
with ZipFile(zip_pack_path, 'w', ZIP_DEFLATED) as pack_zip:
for root, dirs, files in os.walk(self._pack_path, topdown=True):
for f in files:
full_file_path = os.path.join(root, f)
relative_file_path = os.path.relpath(full_file_path, self._pack_path)
pack_zip.write(filename=full_file_path, arcname=relative_file_path)
if encryption_key:
self.encrypt_pack(zip_pack_path, pack_name, encryption_key, extract_destination_path,
private_artifacts_dir, secondary_encryption_key)
task_status = True
logging.success(f"Finished zipping {self._pack_name} pack.")
except Exception:
logging.exception(f"Failed in zipping {self._pack_name} folder")
finally:
# If the pack needs to be encrypted, it is initially at a different location than this final path
final_path_to_zipped_pack = f"{self._pack_path}.zip"
return task_status, final_path_to_zipped_pack
def detect_modified(self, content_repo, index_folder_path, current_commit_hash, previous_commit_hash):
""" Detects pack modified files.
The diff is done between current commit and previous commit that was saved in metadata that was downloaded from
index. In case that no commit was found in index (initial run), the default value will be set to previous commit
from origin/master.
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): full path to downloaded index folder.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with.
Returns:
bool: whether the operation succeeded.
list: list of files that were modified.
bool: whether pack was modified and override will be required.
"""
task_status = False
modified_files_paths = []
pack_was_modified = False
try:
pack_index_metadata_path = os.path.join(index_folder_path, self._pack_name, Pack.METADATA)
if not os.path.exists(pack_index_metadata_path):
logging.info(f"{self._pack_name} pack was not found in index, skipping detection of modified pack.")
task_status = True
return
with open(pack_index_metadata_path, 'r') as metadata_file:
downloaded_metadata = json.load(metadata_file)
previous_commit_hash = downloaded_metadata.get(Metadata.COMMIT, previous_commit_hash)
# set 2 commits by hash value in order to check the modified files of the diff
current_commit = content_repo.commit(current_commit_hash)
previous_commit = content_repo.commit(previous_commit_hash)
for modified_file in current_commit.diff(previous_commit):
if modified_file.a_path.startswith(PACKS_FOLDER):
modified_file_path_parts = os.path.normpath(modified_file.a_path).split(os.sep)
if modified_file_path_parts[1] and modified_file_path_parts[1] == self._pack_name:
if not is_ignored_pack_file(modified_file_path_parts):
logging.info(f"Detected modified files in {self._pack_name} pack")
task_status, pack_was_modified = True, True
modified_files_paths.append(modified_file.a_path)
else:
logging.debug(f'{modified_file.a_path} is an ignored file')
task_status = True
if pack_was_modified:
# Make sure the modification is not only of release notes files, if so count that as not modified
pack_was_modified = not all(self.RELEASE_NOTES in path for path in modified_files_paths)
return
except Exception:
logging.exception(f"Failed in detecting modified files of {self._pack_name} pack")
finally:
return task_status, modified_files_paths, pack_was_modified
def upload_to_storage(self, zip_pack_path, latest_version, storage_bucket, override_pack,
private_content=False, pack_artifacts_path=None):
""" Manages the upload of pack zip artifact to correct path in cloud storage.
The zip pack will be uploaded to following path: /content/packs/pack_name/pack_latest_version.
In case that zip pack artifact already exist at constructed path, the upload will be skipped.
If flag override_pack is set to True, pack will forced for upload.
Args:
zip_pack_path (str): full path to pack zip artifact.
latest_version (str): pack latest version.
storage_bucket (google.cloud.storage.bucket.Bucket): google cloud storage bucket.
override_pack (bool): whether to override existing pack.
private_content (bool): Is being used in a private content build.
pack_artifacts_path (str): Path to where we are saving pack artifacts.
Returns:
bool: whether the operation succeeded.
bool: True in case of pack existence at targeted path and upload was skipped, otherwise returned False.
"""
task_status = True
try:
version_pack_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, self._pack_name, latest_version)
existing_files = [f.name for f in storage_bucket.list_blobs(prefix=version_pack_path)]
if existing_files and not override_pack:
logging.warning(f"The following packs already exist at storage: {', '.join(existing_files)}")
logging.warning(f"Skipping step of uploading {self._pack_name}.zip to storage.")
return task_status, True, None
pack_full_path = os.path.join(version_pack_path, f"{self._pack_name}.zip")
blob = storage_bucket.blob(pack_full_path)
blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
with open(zip_pack_path, "rb") as pack_zip:
blob.upload_from_file(pack_zip)
if private_content:
secondary_encryption_key_pack_name = f"{self._pack_name}.enc2.zip"
secondary_encryption_key_bucket_path = os.path.join(version_pack_path,
secondary_encryption_key_pack_name)
# In some cases the path given is actually a zip.
if pack_artifacts_path.endswith('content_packs.zip'):
_pack_artifacts_path = pack_artifacts_path.replace('/content_packs.zip', '')
else:
_pack_artifacts_path = pack_artifacts_path
secondary_encryption_key_artifacts_path = zip_pack_path.replace(f'{self._pack_name}',
f'{self._pack_name}.enc2')
blob = storage_bucket.blob(secondary_encryption_key_bucket_path)
blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
with open(secondary_encryption_key_artifacts_path, "rb") as pack_zip:
blob.upload_from_file(pack_zip)
print(
f"Copying {secondary_encryption_key_artifacts_path} to {_pack_artifacts_path}/packs/{self._pack_name}.zip")
shutil.copy(secondary_encryption_key_artifacts_path,
f'{_pack_artifacts_path}/packs/{self._pack_name}.zip')
self.public_storage_path = blob.public_url
logging.success(f"Uploaded {self._pack_name} pack to {pack_full_path} path.")
return task_status, False, pack_full_path
except Exception:
task_status = False
logging.exception(f"Failed in uploading {self._pack_name} pack to gcs.")
return task_status, True, None
def copy_and_upload_to_storage(self, production_bucket, build_bucket, successful_packs_dict):
""" Manages the copy of pack zip artifact from the build bucket to the production bucket.
The zip pack will be copied to following path: /content/packs/pack_name/pack_latest_version if
the pack exists in the successful_packs_dict from Prepare content step in Create Instances job.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): google cloud production bucket.
build_bucket (google.cloud.storage.bucket.Bucket): google cloud build bucket.
successful_packs_dict (dict): the dict of all packs were uploaded in prepare content step
Returns:
bool: Status - whether the operation succeeded.
bool: Skipped pack - true in case of pack existence at the targeted path and the copy process was skipped,
otherwise returned False.
"""
pack_not_uploaded_in_prepare_content = self._pack_name not in successful_packs_dict
if pack_not_uploaded_in_prepare_content:
logging.warning("The following packs already exist at storage.")
logging.warning(f"Skipping step of uploading {self._pack_name}.zip to storage.")
return True, True
latest_version = successful_packs_dict[self._pack_name][BucketUploadFlow.LATEST_VERSION]
self._latest_version = latest_version
build_version_pack_path = os.path.join(GCPConfig.BUILD_BASE_PATH, self._pack_name, latest_version)
# Verifying that the latest version of the pack has been uploaded to the build bucket
existing_bucket_version_files = [f.name for f in build_bucket.list_blobs(prefix=build_version_pack_path)]
if not existing_bucket_version_files:
logging.error(f"{self._pack_name} latest version ({latest_version}) was not found on build bucket at "
f"path {build_version_pack_path}.")
return False, False
# We upload the pack zip object taken from the build bucket into the production bucket
prod_version_pack_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, self._pack_name, latest_version)
prod_pack_zip_path = os.path.join(prod_version_pack_path, f'{self._pack_name}.zip')
build_pack_zip_path = os.path.join(build_version_pack_path, f'{self._pack_name}.zip')
build_pack_zip_blob = build_bucket.blob(build_pack_zip_path)
try:
copied_blob = build_bucket.copy_blob(
blob=build_pack_zip_blob, destination_bucket=production_bucket, new_name=prod_pack_zip_path
)
copied_blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
self.public_storage_path = copied_blob.public_url
task_status = copied_blob.exists()
except Exception as e:
pack_suffix = os.path.join(self._pack_name, latest_version, f'{self._pack_name}.zip')
logging.exception(f"Failed copying {pack_suffix}. Additional Info: {str(e)}")
return False, False
if not task_status:
logging.error(f"Failed in uploading {self._pack_name} pack to production gcs.")
else:
# Determine if pack versions were aggregated during upload
pack_uploaded_in_prepare_content = not pack_not_uploaded_in_prepare_content
if pack_uploaded_in_prepare_content:
agg_str = successful_packs_dict[self._pack_name].get('aggregated')
if agg_str:
self._aggregated = True
self._aggregation_str = agg_str
logging.success(f"Uploaded {self._pack_name} pack to {prod_pack_zip_path} path.")
return task_status, False
def get_changelog_latest_rn(self, changelog_index_path: str) -> Tuple[dict, LooseVersion, str]:
"""
Returns the changelog file contents and the last version of rn in the changelog file
Args:
changelog_index_path (str): the changelog.json file path in the index
Returns: the changelog file contents, the last version, and contents of rn in the changelog file
"""
logging.info(f"Found Changelog for: {self._pack_name}")
if os.path.exists(changelog_index_path):
try:
with open(changelog_index_path, "r") as changelog_file:
changelog = json.load(changelog_file)
except json.JSONDecodeError:
changelog = {}
else:
changelog = {}
# get the latest rn version in the changelog.json file
changelog_rn_versions = [LooseVersion(ver) for ver in changelog]
# no need to check if changelog_rn_versions isn't empty because changelog file exists
changelog_latest_rn_version = max(changelog_rn_versions)
changelog_latest_rn = changelog[changelog_latest_rn_version.vstring]["releaseNotes"]
return changelog, changelog_latest_rn_version, changelog_latest_rn
def get_modified_release_notes_lines(self, release_notes_dir: str, new_release_notes_versions: list,
changelog: dict, modified_rn_files: list):
"""
In the case where an rn file was changed, this function returns the new content
of the release note in the format suitable for the changelog file.
In general, if two rn files are created between two consecutive upload runs (i.e. pack was changed twice),
the rn files are being aggregated and the latter version is the one that is being used as a key in the changelog
file, and the aggregated rns as the value.
Hence, in the case of changing an rn as such, this function re-aggregates all of the rns under the
corresponding version key, and returns the aggregated data, in the right format, as value under that key.
Args:
release_notes_dir (str): the path to the release notes dir
new_release_notes_versions (list): a list of the new versions of release notes in the pack since the
last upload. This means they were already handled on this upload run (and aggregated if needed).
changelog (dict): the changelog from the production bucket.
modified_rn_files (list): a list of the rn files that were modified according to the last commit in
'filename.md' format.
Returns:
A dict of modified version and their release notes contents, for modified
in the current index file
"""
modified_versions_dict = {}
for rn_filename in modified_rn_files:
version = release_notes_file_to_version(rn_filename)
# Should only apply on modified files that are not the last rn file
if version in new_release_notes_versions:
continue
# The case where the version is a key in the changelog file,
# and the value is not an aggregated release note
if is_the_only_rn_in_block(release_notes_dir, version, changelog):
logging.info("The version is a key in the changelog file and by itself in the changelog block")
with open(os.path.join(release_notes_dir, rn_filename), 'r') as rn_file:
rn_lines = rn_file.read()
modified_versions_dict[version] = self._clean_release_notes(rn_lines).strip()
# The case where the version is not a key in the changelog file or it is a key of aggregated content
else:
logging.debug(f'The "{version}" version is not a key in the changelog file or it is a key of'
f' aggregated content')
same_block_versions_dict, higher_nearest_version = self.get_same_block_versions(
release_notes_dir, version, changelog)
modified_versions_dict[higher_nearest_version] = aggregate_release_notes_for_marketplace(
same_block_versions_dict)
return modified_versions_dict
def get_same_block_versions(self, release_notes_dir: str, version: str, changelog: dict):
"""
Get a dict of the version as key and rn data as value of all of the versions that are in the same
block in the changelog file as the given version (these are the versions that were aggregates together
during a single upload priorly).
Args:
release_notes_dir (str): the path to the release notes dir
version (str): the wanted version
changelog (dict): the changelog from the production bucket.
Returns:
A dict of version, rn data for all corresponding versions, and the highest version among those keys as str
"""
lowest_version = [LooseVersion(Pack.PACK_INITIAL_VERSION)]
lower_versions, higher_versions = [], []
same_block_versions_dict: dict = dict()
for item in changelog.keys(): # divide the versions into lists of lower and higher than given version
(lower_versions if LooseVersion(item) < version else higher_versions).append(LooseVersion(item))
higher_nearest_version = min(higher_versions)
lower_versions = lower_versions + lowest_version # if the version is 1.0.0, ensure lower_versions is not empty
lower_nearest_version = max(lower_versions)
for rn_filename in os.listdir(release_notes_dir):
current_version = release_notes_file_to_version(rn_filename)
# Catch all versions that are in the same block
if lower_nearest_version < LooseVersion(current_version) <= higher_nearest_version:
with open(os.path.join(release_notes_dir, rn_filename), 'r') as rn_file:
rn_lines = rn_file.read()
same_block_versions_dict[current_version] = self._clean_release_notes(rn_lines).strip()
return same_block_versions_dict, higher_nearest_version.vstring
def get_release_notes_lines(self, release_notes_dir: str, changelog_latest_rn_version: LooseVersion,
changelog_latest_rn: str) -> Tuple[str, str, list]:
"""
Prepares the release notes contents for the new release notes entry
Args:
release_notes_dir (str): the path to the release notes dir
changelog_latest_rn_version (LooseVersion): the last version of release notes in the changelog.json file
changelog_latest_rn (str): the last release notes in the changelog.json file
Returns: The release notes contents, the latest release notes version (in the release notes directory),
and a list of the new rn versions that this is the first time they have been uploaded.
"""
found_versions: list = list()
pack_versions_dict: dict = dict()
for filename in sorted(os.listdir(release_notes_dir)):
version = release_notes_file_to_version(filename)
# Aggregate all rn files that are bigger than what we have in the changelog file
if LooseVersion(version) > changelog_latest_rn_version:
with open(os.path.join(release_notes_dir, filename), 'r') as rn_file:
rn_lines = rn_file.read()
pack_versions_dict[version] = self._clean_release_notes(rn_lines).strip()
found_versions.append(LooseVersion(version))
latest_release_notes_version = max(found_versions)
latest_release_notes_version_str = latest_release_notes_version.vstring
logging.info(f"Latest ReleaseNotes version is: {latest_release_notes_version_str}")
if len(pack_versions_dict) > 1:
# In case that there is more than 1 new release notes file, wrap all release notes together for one
# changelog entry
aggregation_str = f"[{', '.join(lv.vstring for lv in found_versions if lv > changelog_latest_rn_version)}]"\
f" => {latest_release_notes_version_str}"
logging.info(f"Aggregating ReleaseNotes versions: {aggregation_str}")
release_notes_lines = aggregate_release_notes_for_marketplace(pack_versions_dict)
self._aggregated = True
self._aggregation_str = aggregation_str
elif len(pack_versions_dict) == 1:
# In case where there is only one new release notes file
release_notes_lines = pack_versions_dict[latest_release_notes_version_str]
else:
# In case where the pack is up to date, i.e. latest changelog is latest rn file
# We should take the release notes from the index as it has might been aggregated
logging.info(f'No new RN file was detected for pack {self._pack_name}, taking latest RN from the index')
release_notes_lines = changelog_latest_rn
new_release_notes_versions = list(pack_versions_dict.keys())
return release_notes_lines, latest_release_notes_version_str, new_release_notes_versions
def assert_upload_bucket_version_matches_release_notes_version(self,
changelog: dict,
latest_release_notes: str) -> None:
"""
Sometimes there is a the current bucket is not merged from master there could be another version in the upload
bucket, that does not exist in the current branch.
This case can cause unpredicted behavior and we want to fail the build.
This method validates that this is not the case in the current build, and if it does - fails it with an
assertion error.
Args:
changelog: The changelog from the production bucket.
latest_release_notes: The latest release notes version string in the current branch
"""
changelog_latest_release_notes = max(changelog, key=lambda k: LooseVersion(k))
assert LooseVersion(latest_release_notes) >= LooseVersion(changelog_latest_release_notes), \
f'{self._pack_name}: Version mismatch detected between upload bucket and current branch\n' \
f'Upload bucket version: {changelog_latest_release_notes}\n' \
f'current branch version: {latest_release_notes}\n' \
'Please Merge from master and rebuild'
def get_rn_files_names(self, modified_files_paths):
"""
Args:
modified_files_paths: a list containing all modified files in the current pack, generated
by comparing the old and the new commit hash.
Returns:
The names of the modified release notes files out of the given list only,
as in the names of the files that are under ReleaseNotes directory in the format of 'filename.md'.
"""
modified_rn_files = []
for file_path in modified_files_paths:
modified_file_path_parts = os.path.normpath(file_path).split(os.sep)
if self.RELEASE_NOTES in modified_file_path_parts:
modified_rn_files.append(modified_file_path_parts[-1])
return modified_rn_files
def prepare_release_notes(self, index_folder_path, build_number, pack_was_modified=False,
modified_files_paths=None):
"""
Handles the creation and update of the changelog.json files.
Args:
index_folder_path (str): Path to the unzipped index json.
build_number (str): circleCI build number.
pack_was_modified (bool): whether the pack modified or not.
modified_files_paths (list): list of paths of the pack's modified file
Returns:
bool: whether the operation succeeded.
bool: whether running build has not updated pack release notes.
"""
task_status = False
not_updated_build = False
if not modified_files_paths:
modified_files_paths = []
try:
# load changelog from downloaded index
logging.info(f"Loading changelog for {self._pack_name} pack")
changelog_index_path = os.path.join(index_folder_path, self._pack_name, Pack.CHANGELOG_JSON)
if os.path.exists(changelog_index_path):
changelog, changelog_latest_rn_version, changelog_latest_rn = \
self.get_changelog_latest_rn(changelog_index_path)
release_notes_dir = os.path.join(self._pack_path, Pack.RELEASE_NOTES)
if os.path.exists(release_notes_dir):
# Handling latest release notes files
release_notes_lines, latest_release_notes, new_release_notes_versions = \
self.get_release_notes_lines(
release_notes_dir, changelog_latest_rn_version, changelog_latest_rn)
self.assert_upload_bucket_version_matches_release_notes_version(changelog, latest_release_notes)
# Handling modified old release notes files, if there are any
rn_files_names = self.get_rn_files_names(modified_files_paths)
modified_release_notes_lines_dict = self.get_modified_release_notes_lines(
release_notes_dir, new_release_notes_versions, changelog, rn_files_names)
if self._current_version != latest_release_notes:
# TODO Need to implement support for pre-release versions
logging.error(f"Version mismatch detected between current version: {self._current_version} "
f"and latest release notes version: {latest_release_notes}")
task_status = False
return task_status, not_updated_build
else:
if latest_release_notes in changelog:
logging.info(f"Found existing release notes for version: {latest_release_notes}")
version_changelog = self._create_changelog_entry(release_notes=release_notes_lines,
version_display_name=latest_release_notes,
build_number=build_number,
pack_was_modified=pack_was_modified,
new_version=False)
else:
logging.info(f"Created new release notes for version: {latest_release_notes}")
version_changelog = self._create_changelog_entry(release_notes=release_notes_lines,
version_display_name=latest_release_notes,
build_number=build_number,
new_version=True)
if version_changelog:
changelog[latest_release_notes] = version_changelog
if modified_release_notes_lines_dict:
logging.info("updating changelog entries for modified rn")
for version, modified_release_notes_lines in modified_release_notes_lines_dict.items():
updated_entry = self._get_updated_changelog_entry(
changelog, version, release_notes=modified_release_notes_lines)
changelog[version] = updated_entry
else: # will enter only on initial version and release notes folder still was not created
if len(changelog.keys()) > 1 or Pack.PACK_INITIAL_VERSION not in changelog:
logging.warning(
f"{self._pack_name} pack mismatch between {Pack.CHANGELOG_JSON} and {Pack.RELEASE_NOTES}")
task_status, not_updated_build = True, True
return task_status, not_updated_build
changelog[Pack.PACK_INITIAL_VERSION] = self._create_changelog_entry(
release_notes=self.description,
version_display_name=Pack.PACK_INITIAL_VERSION,
build_number=build_number,
initial_release=True,
new_version=False)
logging.info(f"Found existing release notes for version: {Pack.PACK_INITIAL_VERSION} "
f"in the {self._pack_name} pack.")
elif self._current_version == Pack.PACK_INITIAL_VERSION:
version_changelog = self._create_changelog_entry(
release_notes=self.description,
version_display_name=Pack.PACK_INITIAL_VERSION,
build_number=build_number,
new_version=True,
initial_release=True
)
changelog = {
Pack.PACK_INITIAL_VERSION: version_changelog
}
elif self._hidden:
logging.warning(f"Pack {self._pack_name} is deprecated. Skipping release notes handling.")
task_status = True
not_updated_build = True
return task_status, not_updated_build
else:
logging.error(f"No release notes found for: {self._pack_name}")
task_status = False
return task_status, not_updated_build
# write back changelog with changes to pack folder
with open(os.path.join(self._pack_path, Pack.CHANGELOG_JSON), "w") as pack_changelog:
json.dump(changelog, pack_changelog, indent=4)
task_status = True
logging.success(f"Finished creating {Pack.CHANGELOG_JSON} for {self._pack_name}")
except Exception as e:
logging.error(f"Failed creating {Pack.CHANGELOG_JSON} file for {self._pack_name}.\n "
f"Additional info: {e}")
finally:
return task_status, not_updated_build
def create_local_changelog(self, build_index_folder_path):
""" Copies the pack index changelog.json file to the pack path
Args:
build_index_folder_path: The path to the build index folder
Returns:
bool: whether the operation succeeded.
"""
task_status = True
build_changelog_index_path = os.path.join(build_index_folder_path, self._pack_name, Pack.CHANGELOG_JSON)
pack_changelog_path = os.path.join(self._pack_path, Pack.CHANGELOG_JSON)
if os.path.exists(build_changelog_index_path):
try:
shutil.copyfile(src=build_changelog_index_path, dst=pack_changelog_path)
logging.success(f"Successfully copied pack index changelog.json file from {build_changelog_index_path}"
f" to {pack_changelog_path}.")
except shutil.Error as e:
task_status = False
logging.error(f"Failed copying changelog.json file from {build_changelog_index_path} to "
f"{pack_changelog_path}. Additional info: {str(e)}")
return task_status
else:
task_status = False
logging.error(
f"{self._pack_name} index changelog file is missing in build bucket path: {build_changelog_index_path}")
return task_status and self.is_changelog_exists()
def collect_content_items(self):
""" Iterates over content items folders inside pack and collects content items data.
Returns:
dict: Parsed content items
.
"""
task_status = False
content_items_result = {}
try:
# the format is defined in issue #19786, may change in the future
content_item_name_mapping = {
PackFolders.SCRIPTS.value: "automation",
PackFolders.PLAYBOOKS.value: "playbook",
PackFolders.INTEGRATIONS.value: "integration",
PackFolders.INCIDENT_FIELDS.value: "incidentfield",
PackFolders.INCIDENT_TYPES.value: "incidenttype",
PackFolders.DASHBOARDS.value: "dashboard",
PackFolders.INDICATOR_FIELDS.value: "indicatorfield",
PackFolders.REPORTS.value: "report",
PackFolders.INDICATOR_TYPES.value: "reputation",
PackFolders.LAYOUTS.value: "layoutscontainer",
PackFolders.CLASSIFIERS.value: "classifier",
PackFolders.WIDGETS.value: "widget",
PackFolders.GENERIC_DEFINITIONS.value: "GenericDefinitions",
PackFolders.GENERIC_FIELDS.value: "GenericFields",
PackFolders.GENERIC_MODULES.value: "GenericModules",
PackFolders.GENERIC_TYPES.value: "GenericTypes",
PackFolders.LISTS.value: "list",
PackFolders.PREPROCESS_RULES.value: "preprocessrule",
}
for root, pack_dirs, pack_files_names in os.walk(self._pack_path, topdown=False):
current_directory = root.split(os.path.sep)[-1]
parent_directory = root.split(os.path.sep)[-2]
if parent_directory in [PackFolders.GENERIC_TYPES.value, PackFolders.GENERIC_FIELDS.value]:
current_directory = parent_directory
elif current_directory in [PackFolders.GENERIC_TYPES.value, PackFolders.GENERIC_FIELDS.value]:
continue
folder_collected_items = []
for pack_file_name in pack_files_names:
if not pack_file_name.endswith(('.json', '.yml')):
continue
pack_file_path = os.path.join(root, pack_file_name)
# reputation in old format aren't supported in 6.0.0 server version
if current_directory == PackFolders.INDICATOR_TYPES.value \
and not fnmatch.fnmatch(pack_file_name, 'reputation-*.json'):
os.remove(pack_file_path)
logging.info(f"Deleted pack {pack_file_name} reputation file for {self._pack_name} pack")
continue
with open(pack_file_path, 'r') as pack_file:
if current_directory in PackFolders.yml_supported_folders():
content_item = yaml.safe_load(pack_file)
elif current_directory in PackFolders.json_supported_folders():
content_item = json.load(pack_file)
else:
continue
# check if content item has to version
to_version = content_item.get('toversion') or content_item.get('toVersion')
if to_version and LooseVersion(to_version) < LooseVersion(Metadata.SERVER_DEFAULT_MIN_VERSION):
os.remove(pack_file_path)
logging.info(
f"{self._pack_name} pack content item {pack_file_name} has to version: {to_version}. "
f"{pack_file_name} file was deleted.")
continue
if current_directory not in PackFolders.pack_displayed_items():
continue # skip content items that are not displayed in contentItems
logging.debug(
f"Iterating over {pack_file_path} file and collecting items of {self._pack_name} pack")
# updated min server version from current content item
self._server_min_version = get_updated_server_version(self._server_min_version, content_item,
self._pack_name)
content_item_tags = content_item.get('tags', [])
if current_directory == PackFolders.SCRIPTS.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'description': content_item.get('comment', ""),
'tags': content_item_tags
})
if not self._contains_transformer and 'transformer' in content_item_tags:
self._contains_transformer = True
if not self._contains_filter and 'filter' in content_item_tags:
self._contains_filter = True
elif current_directory == PackFolders.PLAYBOOKS.value:
self.is_feed_pack(content_item, 'Playbook')
folder_collected_items.append({
'name': content_item.get('name', ""),
'description': content_item.get('description', "")
})
elif current_directory == PackFolders.INTEGRATIONS.value:
integration_commands = content_item.get('script', {}).get('commands', [])
self.is_feed_pack(content_item, 'Integration')
folder_collected_items.append({
'name': content_item.get('display', ""),
'description': content_item.get('description', ""),
'category': content_item.get('category', ""),
'commands': [
{'name': c.get('name', ""), 'description': c.get('description', "")}
for c in integration_commands]
})
elif current_directory == PackFolders.INCIDENT_FIELDS.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'type': content_item.get('type', ""),
'description': content_item.get('description', "")
})
elif current_directory == PackFolders.INCIDENT_TYPES.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'playbook': content_item.get('playbookId', ""),
'closureScript': content_item.get('closureScript', ""),
'hours': int(content_item.get('hours', 0)),
'days': int(content_item.get('days', 0)),
'weeks': int(content_item.get('weeks', 0))
})
elif current_directory == PackFolders.DASHBOARDS.value:
folder_collected_items.append({
'name': content_item.get('name', "")
})
elif current_directory == PackFolders.INDICATOR_FIELDS.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'type': content_item.get('type', ""),
'description': content_item.get('description', "")
})
elif current_directory == PackFolders.REPORTS.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'description': content_item.get('description', "")
})
elif current_directory == PackFolders.INDICATOR_TYPES.value:
folder_collected_items.append({
'details': content_item.get('details', ""),
'reputationScriptName': content_item.get('reputationScriptName', ""),
'enhancementScriptNames': content_item.get('enhancementScriptNames', [])
})
elif current_directory == PackFolders.LAYOUTS.value:
layout_metadata = {
'name': content_item.get('name', '')
}
layout_description = content_item.get('description')
if layout_description is not None:
layout_metadata['description'] = layout_description
folder_collected_items.append(layout_metadata)
elif current_directory == PackFolders.CLASSIFIERS.value:
folder_collected_items.append({
'name': content_item.get('name') or content_item.get('id', ""),
'description': content_item.get('description', '')
})
elif current_directory == PackFolders.WIDGETS.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'dataType': content_item.get('dataType', ""),
'widgetType': content_item.get('widgetType', "")
})
elif current_directory == PackFolders.LISTS.value:
folder_collected_items.append({
'name': content_item.get('name', "")
})
elif current_directory == PackFolders.GENERIC_DEFINITIONS.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'description': content_item.get('description', ""),
})
elif parent_directory == PackFolders.GENERIC_FIELDS.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'description': content_item.get('description', ""),
})
elif current_directory == PackFolders.GENERIC_MODULES.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'description': content_item.get('description', ""),
})
elif parent_directory == PackFolders.GENERIC_TYPES.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'description': content_item.get('description', ""),
})
elif current_directory == PackFolders.PREPROCESS_RULES.value:
folder_collected_items.append({
'name': content_item.get('name', ""),
'description': content_item.get('description', ""),
})
if current_directory in PackFolders.pack_displayed_items():
content_item_key = content_item_name_mapping[current_directory]
content_items_result[content_item_key] = \
content_items_result.get(content_item_key, []) + folder_collected_items
logging.success(f"Finished collecting content items for {self._pack_name} pack")
task_status = True
except Exception:
logging.exception(f"Failed collecting content items in {self._pack_name} pack")
finally:
self._content_items = content_items_result
return task_status
def load_user_metadata(self):
""" Loads user defined metadata and stores part of it's data in defined properties fields.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
user_metadata = {}
try:
user_metadata_path = os.path.join(self._pack_path, Pack.USER_METADATA) # user metadata path before parsing
if not os.path.exists(user_metadata_path):
logging.error(f"{self._pack_name} pack is missing {Pack.USER_METADATA} file.")
return task_status
with open(user_metadata_path, "r") as user_metadata_file:
user_metadata = json.load(user_metadata_file) # loading user metadata
# part of old packs are initialized with empty list
user_metadata = {} if isinstance(user_metadata, list) else user_metadata
# store important user metadata fields
self.support_type = user_metadata.get(Metadata.SUPPORT, Metadata.XSOAR_SUPPORT)
self.current_version = user_metadata.get(Metadata.CURRENT_VERSION, '')
self.hidden = user_metadata.get(Metadata.HIDDEN, False)
self.description = user_metadata.get(Metadata.DESCRIPTION, False)
self.display_name = user_metadata.get(Metadata.NAME, '')
self._user_metadata = user_metadata
self.eula_link = user_metadata.get(Metadata.EULA_LINK, Metadata.EULA_URL)
logging.info(f"Finished loading {self._pack_name} pack user metadata")
task_status = True
except Exception:
logging.exception(f"Failed in loading {self._pack_name} user metadata.")
finally:
return task_status
def _collect_pack_tags(self, user_metadata, landing_page_sections, trending_packs):
tags = set(input_to_list(input_data=user_metadata.get('tags')))
tags |= self._get_tags_from_landing_page(landing_page_sections)
tags |= {PackTags.TIM} if self._is_feed else set()
tags |= {PackTags.USE_CASE} if self._use_cases else set()
tags |= {PackTags.TRANSFORMER} if self._contains_transformer else set()
tags |= {PackTags.FILTER} if self._contains_filter else set()
if self._create_date:
days_since_creation = (datetime.utcnow() - datetime.strptime(self._create_date, Metadata.DATE_FORMAT)).days
if days_since_creation <= 30:
tags |= {PackTags.NEW}
else:
tags -= {PackTags.NEW}
if trending_packs:
if self._pack_name in trending_packs:
tags |= {PackTags.TRENDING}
else:
tags -= {PackTags.TRENDING}
return tags
def _enhance_pack_attributes(self, index_folder_path, pack_was_modified,
dependencies_data, statistics_handler=None, format_dependencies_only=False):
""" Enhances the pack object with attributes for the metadata file
Args:
dependencies_data (dict): mapping of pack dependencies data, of all levels.
format_dependencies_only (bool): Indicates whether the metadata formation is just for formatting the
dependencies or not.
Returns:
dict: parsed pack metadata.
"""
landing_page_sections = mp_statistics.StatisticsHandler.get_landing_page_sections()
displayed_dependencies = self.user_metadata.get(Metadata.DISPLAYED_IMAGES, [])
trending_packs = None
pack_dependencies_by_download_count = displayed_dependencies
if not format_dependencies_only:
# ===== Pack Regular Attributes =====
self._support_type = self.user_metadata.get(Metadata.SUPPORT, Metadata.XSOAR_SUPPORT)
self._support_details = self._create_support_section(
support_type=self._support_type, support_url=self.user_metadata.get(Metadata.URL),
support_email=self.user_metadata.get(Metadata.EMAIL)
)
self._author = self._get_author(
support_type=self._support_type, author=self.user_metadata.get(Metadata.AUTHOR, ''))
self._certification = self._get_certification(
support_type=self._support_type, certification=self.user_metadata.get(Metadata.CERTIFICATION)
)
self._legacy = self.user_metadata.get(Metadata.LEGACY, True)
self._create_date = self._get_pack_creation_date(index_folder_path)
self._update_date = self._get_pack_update_date(index_folder_path, pack_was_modified)
self._use_cases = input_to_list(input_data=self.user_metadata.get(Metadata.USE_CASES), capitalize_input=True)
self._categories = input_to_list(input_data=self.user_metadata.get(Metadata.CATEGORIES), capitalize_input=True)
self._keywords = input_to_list(self.user_metadata.get(Metadata.KEY_WORDS))
self._dependencies = self._parse_pack_dependencies(
self.user_metadata.get(Metadata.DEPENDENCIES, {}), dependencies_data)
# ===== Pack Private Attributes =====
if not format_dependencies_only:
self._is_private_pack = Metadata.PARTNER_ID in self.user_metadata
self._is_premium = self._is_private_pack
self._preview_only = get_valid_bool(self.user_metadata.get(Metadata.PREVIEW_ONLY, False))
self._price = convert_price(pack_id=self._pack_name, price_value_input=self.user_metadata.get('price'))
if self._is_private_pack:
self._vendor_id = self.user_metadata.get(Metadata.VENDOR_ID, "")
self._partner_id = self.user_metadata.get(Metadata.PARTNER_ID, "")
self._partner_name = self.user_metadata.get(Metadata.PARTNER_NAME, "")
self._content_commit_hash = self.user_metadata.get(Metadata.CONTENT_COMMIT_HASH, "")
# Currently all content packs are legacy.
# Since premium packs cannot be legacy, we directly set this attribute to false.
self._legacy = False
# ===== Pack Statistics Attributes =====
if not self._is_private_pack and statistics_handler: # Public Content case
self._pack_statistics_handler = mp_statistics.PackStatisticsHandler(
self._pack_name, statistics_handler.packs_statistics_df, statistics_handler.packs_download_count_desc,
displayed_dependencies
)
self._downloads_count = self._pack_statistics_handler.download_count
trending_packs = statistics_handler.trending_packs
pack_dependencies_by_download_count = self._pack_statistics_handler.displayed_dependencies_sorted
self._tags = self._collect_pack_tags(self.user_metadata, landing_page_sections, trending_packs)
self._search_rank = mp_statistics.PackStatisticsHandler.calculate_search_rank(
tags=self._tags, certification=self._certification, content_items=self._content_items
)
self._related_integration_images = self._get_all_pack_images(
self._displayed_integration_images, displayed_dependencies, dependencies_data,
pack_dependencies_by_download_count
)
def format_metadata(self, index_folder_path, packs_dependencies_mapping, build_number, commit_hash,
pack_was_modified, statistics_handler, pack_names=None, format_dependencies_only=False):
""" Re-formats metadata according to marketplace metadata format defined in issue #19786 and writes back
the result.
Args:
index_folder_path (str): downloaded index folder directory path.
packs_dependencies_mapping (dict): all packs dependencies lookup mapping.
build_number (str): circleCI build number.
commit_hash (str): current commit hash.
pack_was_modified (bool): Indicates whether the pack was modified or not.
statistics_handler (StatisticsHandler): The marketplace statistics handler
pack_names (set): List of all packs.
format_dependencies_only (bool): Indicates whether the metadata formation is just for formatting the
dependencies or not.
Returns:
bool: True is returned in case metadata file was parsed successfully, otherwise False.
bool: True is returned in pack is missing dependencies.
"""
task_status = False
pack_names = pack_names if pack_names else []
try:
self.set_pack_dependencies(packs_dependencies_mapping)
if Metadata.DISPLAYED_IMAGES not in self.user_metadata:
self._user_metadata[Metadata.DISPLAYED_IMAGES] = packs_dependencies_mapping.get(
self._pack_name, {}).get(Metadata.DISPLAYED_IMAGES, [])
logging.info(f"Adding auto generated display images for {self._pack_name} pack")
dependencies_data, is_missing_dependencies = \
self._load_pack_dependencies(index_folder_path, pack_names)
self._enhance_pack_attributes(
index_folder_path, pack_was_modified, dependencies_data, statistics_handler,
format_dependencies_only
)
formatted_metadata = self._parse_pack_metadata(build_number, commit_hash)
metadata_path = os.path.join(self._pack_path, Pack.METADATA) # deployed metadata path after parsing
json_write(metadata_path, formatted_metadata) # writing back parsed metadata
logging.success(f"Finished formatting {self._pack_name} packs's {Pack.METADATA} {metadata_path} file.")
task_status = True
except Exception as e:
logging.exception(f"Failed in formatting {self._pack_name} pack metadata. Additional Info: {str(e)}")
finally:
return task_status, is_missing_dependencies
@staticmethod
def pack_created_in_time_delta(pack_name, time_delta: timedelta, index_folder_path: str) -> bool:
"""
Checks if pack created before delta specified in the 'time_delta' argument and return boolean according
to the result
Args:
pack_name: the pack name.
time_delta: time_delta to check if pack was created before.
index_folder_path: downloaded index folder directory path.
Returns:
True if pack was created before the time_delta from now, and False otherwise.
"""
pack_creation_time_str = Pack._calculate_pack_creation_date(pack_name, index_folder_path)
return datetime.utcnow() - datetime.strptime(pack_creation_time_str, Metadata.DATE_FORMAT) < time_delta
def _get_pack_creation_date(self, index_folder_path):
return self._calculate_pack_creation_date(self._pack_name, index_folder_path)
@staticmethod
def _calculate_pack_creation_date(pack_name, index_folder_path):
""" Gets the pack created date.
Args:
index_folder_path (str): downloaded index folder directory path.
Returns:
datetime: Pack created date.
"""
created_time = datetime.utcnow().strftime(Metadata.DATE_FORMAT)
metadata = load_json(os.path.join(index_folder_path, pack_name, Pack.METADATA))
if metadata:
if metadata.get(Metadata.CREATED):
created_time = metadata.get(Metadata.CREATED)
else:
raise Exception(f'The metadata file of the {pack_name} pack does not contain "{Metadata.CREATED}" time')
return created_time
def _get_pack_update_date(self, index_folder_path, pack_was_modified):
""" Gets the pack update date.
Args:
index_folder_path (str): downloaded index folder directory path.
pack_was_modified (bool): whether the pack was modified or not.
Returns:
datetime: Pack update date.
"""
latest_changelog_released_date = datetime.utcnow().strftime(Metadata.DATE_FORMAT)
changelog = load_json(os.path.join(index_folder_path, self._pack_name, Pack.CHANGELOG_JSON))
if changelog and not pack_was_modified:
packs_latest_release_notes = max(LooseVersion(ver) for ver in changelog)
latest_changelog_version = changelog.get(packs_latest_release_notes.vstring, {})
latest_changelog_released_date = latest_changelog_version.get('released')
return latest_changelog_released_date
def set_pack_dependencies(self, packs_dependencies_mapping):
pack_dependencies = packs_dependencies_mapping.get(self._pack_name, {}).get(Metadata.DEPENDENCIES, {})
if Metadata.DEPENDENCIES not in self.user_metadata:
self._user_metadata[Metadata.DEPENDENCIES] = {}
# If it is a core pack, check that no new mandatory packs (that are not core packs) were added
# They can be overridden in the user metadata to be not mandatory so we need to check there as well
if self._pack_name in GCPConfig.CORE_PACKS_LIST:
mandatory_dependencies = [k for k, v in pack_dependencies.items()
if v.get(Metadata.MANDATORY, False) is True
and k not in GCPConfig.CORE_PACKS_LIST
and k not in self.user_metadata[Metadata.DEPENDENCIES].keys()]
if mandatory_dependencies:
raise Exception(f'New mandatory dependencies {mandatory_dependencies} were '
f'found in the core pack {self._pack_name}')
pack_dependencies.update(self.user_metadata[Metadata.DEPENDENCIES])
self._user_metadata[Metadata.DEPENDENCIES] = pack_dependencies
def prepare_for_index_upload(self):
""" Removes and leaves only necessary files in pack folder.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
files_to_leave = [Pack.METADATA, Pack.CHANGELOG_JSON, Pack.README]
try:
for file_or_folder in os.listdir(self._pack_path):
files_or_folder_path = os.path.join(self._pack_path, file_or_folder)
if file_or_folder in files_to_leave:
continue
if os.path.isdir(files_or_folder_path):
shutil.rmtree(files_or_folder_path)
else:
os.remove(files_or_folder_path)
task_status = True
except Exception:
logging.exception(f"Failed in preparing index for upload in {self._pack_name} pack.")
finally:
return task_status
@staticmethod
def _get_spitted_yml_image_data(root, target_folder_files):
""" Retrieves pack integration image and integration display name and returns binding image data.
Args:
root (str): full path to the target folder to search integration image.
target_folder_files (list): list of files inside the targeted folder.
Returns:
dict: path to integration image and display name of the integration.
"""
image_data = {}
for pack_file in target_folder_files:
if pack_file.startswith('.'):
continue
elif pack_file.endswith('_image.png'):
image_data['repo_image_path'] = os.path.join(root, pack_file)
elif pack_file.endswith('.yml'):
with open(os.path.join(root, pack_file), 'r') as integration_file:
integration_yml = yaml.safe_load(integration_file)
image_data['display_name'] = integration_yml.get('display', '')
return image_data
def _get_image_data_from_yml(self, pack_file_path):
""" Creates temporary image file and retrieves integration display name.
Args:
pack_file_path (str): full path to the target yml_path integration yml to search integration image.
Returns:
dict: path to temporary integration image, display name of the integrations and the basename of
the integration in content_pack.zip.
"""
image_data = {}
if pack_file_path.endswith('.yml'):
with open(pack_file_path, 'r') as integration_file:
integration_yml = yaml.safe_load(integration_file)
image_data['display_name'] = integration_yml.get('display', '')
# create temporary file of base64 decoded data
integration_name = integration_yml.get('name', '')
base64_image = integration_yml['image'].split(',')[1] if integration_yml.get('image') else None
if not base64_image:
logging.warning(f"{integration_name} integration image was not found in {self._pack_name} pack")
return {}
temp_image_name = f'{integration_name.replace(" ", "")}_image.png'
temp_image_path = os.path.join(self._pack_path, temp_image_name)
with open(temp_image_path, 'wb') as image_file:
image_file.write(base64.b64decode(base64_image))
self._remove_files_list.append(temp_image_name) # add temporary file to tracking list
image_data['image_path'] = temp_image_path
image_data['integration_path_basename'] = os.path.basename(pack_file_path)
logging.info(f"Created temporary integration {image_data['display_name']} image for {self._pack_name} pack")
return image_data
def _search_for_images(self, target_folder):
""" Searches for png files in targeted folder.
Args:
target_folder (str): full path to directory to search.
Returns:
list: list of dictionaries that include image path and display name of integration, example:
[{'image_path': image_path, 'display_name': integration_display_name},...]
"""
target_folder_path = os.path.join(self._pack_path, target_folder)
images_list = []
if os.path.exists(target_folder_path):
for pack_item in os.scandir(target_folder_path):
image_data = self._get_image_data_from_yml(pack_item.path)
if image_data and image_data not in images_list:
images_list.append(image_data)
return images_list
def check_if_exists_in_index(self, index_folder_path):
""" Checks if pack is sub-folder of downloaded index.
Args:
index_folder_path (str): index folder full path.
Returns:
bool: whether the operation succeeded.
bool: whether pack exists in index folder.
"""
task_status, exists_in_index = False, False
try:
if not os.path.exists(index_folder_path):
logging.error(f"{GCPConfig.INDEX_NAME} does not exists.")
return task_status, exists_in_index
exists_in_index = os.path.exists(os.path.join(index_folder_path, self._pack_name))
task_status = True
except Exception:
logging.exception(f"Failed searching {self._pack_name} pack in {GCPConfig.INDEX_NAME}")
finally:
return task_status, exists_in_index
@staticmethod
def remove_contrib_suffix_from_name(display_name: str) -> str:
""" Removes the contribution details suffix from the integration's display name
Args:
display_name (str): The integration display name.
Returns:
str: The display name without the contrib details suffix
"""
contribution_suffixes = ('(Partner Contribution)', '(Developer Contribution)', '(Community Contribution)')
for suffix in contribution_suffixes:
index = display_name.find(suffix)
if index != -1:
display_name = display_name[:index].rstrip(' ')
break
return display_name
@staticmethod
def need_to_upload_integration_image(image_data: dict, integration_dirs: list, unified_integrations: list):
""" Checks whether needs to upload the integration image or not.
We upload in one of the two cases:
1. The integration_path_basename is one of the integration dirs detected
2. The integration_path_basename is one of the added/modified unified integrations
Args:
image_data (dict): path to temporary integration image, display name of the integrations and the basename of
the integration in content_pack.zip.
integration_dirs (list): The list of integrations to search in for images
unified_integrations (list): The list of unified integrations to upload their image
Returns:
bool: True if we need to upload the image or not
"""
integration_path_basename = image_data['integration_path_basename']
return any([
re.findall(BucketUploadFlow.INTEGRATION_DIR_REGEX, integration_path_basename)[0] in integration_dirs,
integration_path_basename in unified_integrations
])
def upload_integration_images(self, storage_bucket, diff_files_list=None, detect_changes=False):
""" Uploads pack integrations images to gcs.
The returned result of integration section are defined in issue #19786.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where image will be uploaded.
diff_files_list (list): The list of all modified/added files found in the diff
detect_changes (bool): Whether to detect changes or upload all images in any case.
Returns:
bool: whether the operation succeeded.
list: list of dictionaries with uploaded pack integration images.
"""
task_status = True
integration_images = []
integration_dirs = []
unified_integrations = []
try:
if detect_changes:
# detect added/modified integration images
for file in diff_files_list:
if self.is_integration_image(file.a_path):
# integration dir name will show up in the unified integration file path in content_packs.zip
integration_dirs.append(os.path.basename(os.path.dirname(file.a_path)))
elif self.is_unified_integration(file.a_path):
# if the file found in the diff is a unified integration we upload its image
unified_integrations.append(os.path.basename(file.a_path))
pack_local_images = self._search_for_images(target_folder=PackFolders.INTEGRATIONS.value)
if not pack_local_images:
return True # return empty list if no images were found
pack_storage_root_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, self._pack_name)
for image_data in pack_local_images:
image_path = image_data.get('image_path')
if not image_path:
raise Exception(f"{self._pack_name} pack integration image was not found")
image_name = os.path.basename(image_path)
image_storage_path = os.path.join(pack_storage_root_path, image_name)
pack_image_blob = storage_bucket.blob(image_storage_path)
if not detect_changes or \
self.need_to_upload_integration_image(image_data, integration_dirs, unified_integrations):
# upload the image if needed
logging.info(f"Uploading image: {image_name} of integration: {image_data.get('display_name')} "
f"from pack: {self._pack_name}")
with open(image_path, "rb") as image_file:
pack_image_blob.upload_from_file(image_file)
self._uploaded_integration_images.append(image_name)
if GCPConfig.USE_GCS_RELATIVE_PATH:
image_gcs_path = urllib.parse.quote(
os.path.join(GCPConfig.IMAGES_BASE_PATH, self._pack_name, image_name))
else:
image_gcs_path = pack_image_blob.public_url
integration_name = image_data.get('display_name', '')
if self.support_type != Metadata.XSOAR_SUPPORT:
integration_name = self.remove_contrib_suffix_from_name(integration_name)
integration_images.append({
'name': integration_name,
'imagePath': image_gcs_path
})
if self._uploaded_integration_images:
logging.info(f"Uploaded {len(self._uploaded_integration_images)} images for {self._pack_name} pack.")
except Exception as e:
task_status = False
logging.exception(f"Failed to upload {self._pack_name} pack integration images. Additional Info: {str(e)}")
finally:
self._displayed_integration_images = integration_images
return task_status
def copy_integration_images(self, production_bucket, build_bucket, images_data):
""" Copies all pack's integration images from the build bucket to the production bucket
Args:
production_bucket (google.cloud.storage.bucket.Bucket): The production bucket
build_bucket (google.cloud.storage.bucket.Bucket): The build bucket
images_data (dict): The images data structure from Prepare Content step
Returns:
bool: Whether the operation succeeded.
"""
task_status = True
num_copied_images = 0
err_msg = f"Failed copying {self._pack_name} pack integrations images."
pc_uploaded_integration_images = images_data.get(self._pack_name, {}).get(BucketUploadFlow.INTEGRATIONS, [])
for image_name in pc_uploaded_integration_images:
build_bucket_image_path = os.path.join(GCPConfig.BUILD_BASE_PATH, self._pack_name, image_name)
build_bucket_image_blob = build_bucket.blob(build_bucket_image_path)
if not build_bucket_image_blob.exists():
logging.error(f"Found changed/added integration image {image_name} in content repo but "
f"{build_bucket_image_path} does not exist in build bucket")
task_status = False
else:
logging.info(f"Copying {self._pack_name} pack integration image: {image_name}")
try:
copied_blob = build_bucket.copy_blob(
blob=build_bucket_image_blob, destination_bucket=production_bucket,
new_name=os.path.join(GCPConfig.STORAGE_BASE_PATH, self._pack_name, image_name)
)
if not copied_blob.exists():
logging.error(f"Copy {self._pack_name} integration image: {build_bucket_image_blob.name} "
f"blob to {copied_blob.name} blob failed.")
task_status = False
else:
num_copied_images += 1
except Exception as e:
logging.exception(f"{err_msg}. Additional Info: {str(e)}")
return False
if not task_status:
logging.error(err_msg)
else:
if num_copied_images == 0:
logging.info(f"No added/modified integration images were detected in {self._pack_name} pack.")
else:
logging.success(f"Copied {num_copied_images} images for {self._pack_name} pack.")
return task_status
def upload_author_image(self, storage_bucket, diff_files_list=None, detect_changes=False):
""" Uploads pack author image to gcs.
Searches for `Author_image.png` and uploads author image to gcs. In case no such image was found,
default Base pack image path is used and it's gcp path is returned.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where author image will be uploaded.
diff_files_list (list): The list of all modified/added files found in the diff
detect_changes (bool): Whether to detect changes or upload the author image in any case.
Returns:
bool: whether the operation succeeded.
str: public gcp path of author image.
"""
task_status = True
author_image_storage_path = ""
try:
author_image_path = os.path.join(self._pack_path, Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
if os.path.exists(author_image_path):
image_to_upload_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, self._pack_name,
Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
pack_author_image_blob = storage_bucket.blob(image_to_upload_storage_path)
if not detect_changes or any(self.is_author_image(file.a_path) for file in diff_files_list):
# upload the image if needed
with open(author_image_path, "rb") as author_image_file:
pack_author_image_blob.upload_from_file(author_image_file)
self._uploaded_author_image = True
logging.success(f"Uploaded successfully {self._pack_name} pack author image")
if GCPConfig.USE_GCS_RELATIVE_PATH:
author_image_storage_path = urllib.parse.quote(
os.path.join(GCPConfig.IMAGES_BASE_PATH, self._pack_name, Pack.AUTHOR_IMAGE_NAME))
else:
author_image_storage_path = pack_author_image_blob.public_url
elif self.support_type == Metadata.XSOAR_SUPPORT: # use default Base pack image for xsoar supported packs
author_image_storage_path = os.path.join(GCPConfig.IMAGES_BASE_PATH, GCPConfig.BASE_PACK,
Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
if not GCPConfig.USE_GCS_RELATIVE_PATH:
# disable-secrets-detection-start
author_image_storage_path = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name,
author_image_storage_path)
# disable-secrets-detection-end
logging.info((f"Skipping uploading of {self._pack_name} pack author image "
f"and use default {GCPConfig.BASE_PACK} pack image"))
else:
logging.info(f"Skipping uploading of {self._pack_name} pack author image. "
f"The pack is defined as {self.support_type} support type")
except Exception:
logging.exception(f"Failed uploading {self._pack_name} pack author image.")
task_status = False
author_image_storage_path = ""
finally:
self._author_image = author_image_storage_path
return task_status
def copy_author_image(self, production_bucket, build_bucket, images_data):
""" Copies pack's author image from the build bucket to the production bucket
Searches for `Author_image.png`, In case no such image was found, default Base pack image path is used and
it's gcp path is returned.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): The production bucket
build_bucket (google.cloud.storage.bucket.Bucket): The build bucket
images_data (dict): The images data structure from Prepare Content step
Returns:
bool: Whether the operation succeeded.
"""
if images_data.get(self._pack_name, {}).get(BucketUploadFlow.AUTHOR, False):
build_author_image_path = os.path.join(GCPConfig.BUILD_BASE_PATH, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
build_author_image_blob = build_bucket.blob(build_author_image_path)
if build_author_image_blob.exists():
try:
copied_blob = build_bucket.copy_blob(
blob=build_author_image_blob, destination_bucket=production_bucket,
new_name=os.path.join(GCPConfig.STORAGE_BASE_PATH, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
)
if not copied_blob.exists():
logging.error(f"Failed copying {self._pack_name} pack author image.")
return False
else:
logging.success(f"Copied successfully {self._pack_name} pack author image.")
return True
except Exception as e:
logging.exception(f"Failed copying {Pack.AUTHOR_IMAGE_NAME} for {self._pack_name} pack. "
f"Additional Info: {str(e)}")
return False
else:
logging.error(f"Found changed/added author image in content repo for {self._pack_name} pack but "
f"image does not exist in build bucket in path {build_author_image_path}.")
return False
else:
logging.info(f"No added/modified author image was detected in {self._pack_name} pack.")
return True
def cleanup(self):
""" Finalization action, removes extracted pack folder.
"""
if os.path.exists(self._pack_path):
shutil.rmtree(self._pack_path)
logging.info(f"Cleanup {self._pack_name} pack from: {self._pack_path}")
def is_changelog_exists(self):
""" Indicates whether the local changelog of a given pack exists or not
Returns:
bool: The answer
"""
return os.path.isfile(os.path.join(self._pack_path, Pack.CHANGELOG_JSON))
def is_failed_to_upload(self, failed_packs_dict):
"""
Checks if the pack was failed to upload in Prepare Content step in Create Instances job
Args:
failed_packs_dict (dict): The failed packs file
Returns:
bool: Whether the operation succeeded.
str: The pack's failing status
"""
if self._pack_name in failed_packs_dict:
return True, failed_packs_dict[self._pack_name].get('status')
else:
return False, str()
def is_integration_image(self, file_path: str):
""" Indicates whether a file_path is an integration image or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is an integration image or False otherwise
"""
return all([
file_path.startswith(os.path.join(PACKS_FOLDER, self._pack_name)),
file_path.endswith('.png'),
'image' in os.path.basename(file_path.lower()),
os.path.basename(file_path) != Pack.AUTHOR_IMAGE_NAME
])
def is_author_image(self, file_path: str):
""" Indicates whether a file_path is an author image or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is an author image or False otherwise
"""
return file_path == os.path.join(PACKS_FOLDER, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
def is_unified_integration(self, file_path: str):
""" Indicates whether a file_path is a unified integration yml file or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is a unified integration or False otherwise
"""
return all([
file_path.startswith(os.path.join(PACKS_FOLDER, self._pack_name, PackFolders.INTEGRATIONS.value)),
os.path.basename(os.path.dirname(file_path)) == PackFolders.INTEGRATIONS.value,
os.path.basename(file_path).startswith('integration'),
os.path.basename(file_path).endswith('.yml')
])
# HELPER FUNCTIONS
def get_upload_data(packs_results_file_path: str, stage: str) -> Tuple[dict, dict, dict, dict]:
""" Loads the packs_results.json file to get the successful and failed packs together with uploaded images dicts
Args:
packs_results_file_path (str): The path to the file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
Returns:
dict: The successful packs dict
dict: The failed packs dict
dict : The successful private packs dict
dict: The images data dict
"""
if os.path.exists(packs_results_file_path):
packs_results_file = load_json(packs_results_file_path)
stage = packs_results_file.get(stage, {})
successful_packs_dict = stage.get(BucketUploadFlow.SUCCESSFUL_PACKS, {})
failed_packs_dict = stage.get(BucketUploadFlow.FAILED_PACKS, {})
successful_private_packs_dict = stage.get(BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS, {})
images_data_dict = stage.get(BucketUploadFlow.IMAGES, {})
return successful_packs_dict, failed_packs_dict, successful_private_packs_dict, images_data_dict
return {}, {}, {}, {}
def store_successful_and_failed_packs_in_ci_artifacts(packs_results_file_path: str, stage: str, successful_packs: list,
failed_packs: list, updated_private_packs: list,
images_data: dict = None):
""" Write the successful and failed packs to the correct section in the packs_results.json file
Args:
packs_results_file_path (str): The path to the pack_results.json file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
successful_packs (list): The list of all successful packs
failed_packs (list): The list of all failed packs
updated_private_packs (list) : The list of all private packs that were updated
images_data (dict): A dict containing all images that were uploaded for each pack
"""
packs_results = load_json(packs_results_file_path)
packs_results[stage] = dict()
if failed_packs:
failed_packs_dict = {
BucketUploadFlow.FAILED_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False"
} for pack in failed_packs
}
}
packs_results[stage].update(failed_packs_dict)
logging.debug(f"Failed packs {failed_packs_dict}")
if successful_packs:
successful_packs_dict = {
BucketUploadFlow.SUCCESSFUL_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False",
BucketUploadFlow.LATEST_VERSION: pack.latest_version
} for pack in successful_packs
}
}
packs_results[stage].update(successful_packs_dict)
logging.debug(f"Successful packs {successful_packs_dict}")
if updated_private_packs:
successful_private_packs_dict = {
BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS: {pack_name: {} for pack_name in updated_private_packs}
}
packs_results[stage].update(successful_private_packs_dict)
logging.debug(f"Successful private packs {successful_private_packs_dict}")
if images_data:
packs_results[stage].update({BucketUploadFlow.IMAGES: images_data})
logging.debug(f"Images data {images_data}")
if packs_results:
json_write(packs_results_file_path, packs_results)
def load_json(file_path: str) -> dict:
""" Reads and loads json file.
Args:
file_path (str): full path to json file.
Returns:
dict: loaded json file.
"""
try:
if file_path and os.path.exists(file_path):
with open(file_path, 'r') as json_file:
result = json.load(json_file)
else:
result = {}
return result
except json.decoder.JSONDecodeError:
return {}
def json_write(file_path: str, data: Union[list, dict]):
""" Writes given data to a json file
Args:
file_path: The file path
data: The data to write
"""
with open(file_path, "w") as f:
f.write(json.dumps(data, indent=4))
def init_storage_client(service_account=None):
"""Initialize google cloud storage client.
In case of local dev usage the client will be initialized with user default credentials.
Otherwise, client will be initialized from service account json that is stored in CirlceCI.
Args:
service_account (str): full path to service account json.
Return:
storage.Client: initialized google cloud storage client.
"""
if service_account:
storage_client = storage.Client.from_service_account_json(service_account)
logging.info("Created gcp service account")
return storage_client
else:
# in case of local dev use, ignored the warning of non use of service account.
warnings.filterwarnings("ignore", message=google.auth._default._CLOUD_SDK_CREDENTIALS_WARNING)
credentials, project = google.auth.default()
storage_client = storage.Client(credentials=credentials, project=project)
logging.info("Created gcp private account")
return storage_client
def input_to_list(input_data, capitalize_input=False):
""" Helper function for handling input list or str from the user.
Args:
input_data (list or str): input from the user to handle.
capitalize_input (boo): whether to capitalize the input list data or not.
Returns:
list: returns the original list or list that was split by comma.
"""
input_data = input_data if input_data else []
input_data = input_data if isinstance(input_data, list) else [s for s in input_data.split(',') if s]
if capitalize_input:
return [" ".join([w.title() if w.islower() else w for w in i.split()]) for i in input_data]
else:
return input_data
def get_valid_bool(bool_input):
""" Converts and returns valid bool.
Returns:
bool: converted bool input.
"""
return bool(strtobool(bool_input)) if isinstance(bool_input, str) else bool_input
def convert_price(pack_id, price_value_input=None):
""" Converts to integer value price input. In case no price input provided, return zero as price.
Args:
pack_id (str): pack unique identifier.
price_value_input (str): price string to convert.
Returns:
int: converted to int pack price.
"""
try:
if not price_value_input:
return 0 # in case no price was supported, return 0
else:
return int(price_value_input) # otherwise convert to int and return result
except Exception:
logging.exception(f"{pack_id} pack price is not valid. The price was set to 0.")
return 0
def get_updated_server_version(current_string_version, compared_content_item, pack_name):
""" Compares two semantic server versions and returns the higher version between them.
Args:
current_string_version (str): current string version.
compared_content_item (dict): compared content item entity.
pack_name (str): the pack name (id).
Returns:
str: latest version between compared versions.
"""
lower_version_result = current_string_version
try:
compared_string_version = compared_content_item.get('fromversion') or compared_content_item.get(
'fromVersion') or "99.99.99"
current_version, compared_version = LooseVersion(current_string_version), LooseVersion(compared_string_version)
if current_version > compared_version:
lower_version_result = compared_string_version
except Exception:
content_item_name = compared_content_item.get('name') or compared_content_item.get(
'display') or compared_content_item.get('id') or compared_content_item.get('details', '')
logging.exception(f"{pack_name} failed in version comparison of content item {content_item_name}.")
finally:
return lower_version_result
def get_content_git_client(content_repo_path: str):
""" Initializes content repo client.
Args:
content_repo_path (str): content repo full path
Returns:
git.repo.base.Repo: content repo object.
"""
return git.Repo(content_repo_path)
def get_recent_commits_data(content_repo: Any, index_folder_path: str, is_bucket_upload_flow: bool,
is_private_build: bool = False, circle_branch: str = "master"):
""" Returns recent commits hashes (of head and remote master)
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: last commit hash of head.
str: previous commit depending on the flow the script is running
"""
return content_repo.head.commit.hexsha, get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow,
is_private_build, circle_branch)
def get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow, is_private_build, circle_branch):
""" If running in bucket upload workflow we want to get the commit in the index which is the index
We've last uploaded to production bucket. Otherwise, we are in a commit workflow and the diff should be from the
head of origin/master
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: previous commit depending on the flow the script is running
"""
if is_bucket_upload_flow:
return get_last_upload_commit_hash(content_repo, index_folder_path)
elif is_private_build:
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
logging.info(f"Using origin/master HEAD~1 commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
else:
if circle_branch == 'master':
head_str = "HEAD~1"
# if circle branch is master than current commit is origin/master HEAD, so we need to diff with HEAD~1
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
else:
head_str = "HEAD"
# else we are on a regular branch and the diff should be done with origin/master HEAD
previous_master_head_commit = content_repo.commit('origin/master').hexsha
logging.info(f"Using origin/master {head_str} commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
def get_last_upload_commit_hash(content_repo, index_folder_path):
"""
Returns the last origin/master commit hash that was uploaded to the bucket
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path: The path to the index folder
Returns:
The commit hash
"""
inner_index_json_path = os.path.join(index_folder_path, f'{GCPConfig.INDEX_NAME}.json')
if not os.path.exists(inner_index_json_path):
logging.critical(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
sys.exit(1)
else:
inner_index_json_file = load_json(inner_index_json_path)
if 'commit' in inner_index_json_file:
last_upload_commit_hash = inner_index_json_file['commit']
logging.info(f"Retrieved the last commit that was uploaded to production: {last_upload_commit_hash}")
else:
logging.critical(f"No commit field in {GCPConfig.INDEX_NAME}.json, content: {str(inner_index_json_file)}")
sys.exit(1)
try:
last_upload_commit = content_repo.commit(last_upload_commit_hash).hexsha
logging.info(f"Using commit hash {last_upload_commit} from index.json to diff with.")
return last_upload_commit
except Exception as e:
logging.critical(f'Commit {last_upload_commit_hash} in {GCPConfig.INDEX_NAME}.json does not exist in content '
f'repo. Additional info:\n {e}')
sys.exit(1)
def is_ignored_pack_file(modified_file_path_parts):
""" Indicates whether a pack file needs to be ignored or not.
Args:
modified_file_path_parts: The modified file parts, e.g. if file path is "a/b/c" then the
parts list is ["a", "b", "c"]
Returns:
(bool): True if the file should be ignored, False otherwise
"""
for file_suffix in PackIgnored.ROOT_FILES:
if file_suffix in modified_file_path_parts:
return True
for pack_folder, file_suffixes in PackIgnored.NESTED_FILES.items():
if pack_folder in modified_file_path_parts:
if not file_suffixes: # Ignore all pack folder files
return True
for file_suffix in file_suffixes:
if file_suffix in modified_file_path_parts[-1]:
return True
for pack_folder in PackIgnored.NESTED_DIRS:
if pack_folder in modified_file_path_parts:
pack_folder_path = os.sep.join(modified_file_path_parts[:modified_file_path_parts.index(pack_folder) + 1])
file_path = os.sep.join(modified_file_path_parts)
for folder_path in [f for f in glob.glob(os.path.join(pack_folder_path, '*/*')) if os.path.isdir(f)]:
# Checking for all 2nd level directories. e.g. test_data directory
if file_path.startswith(folder_path):
return True
return False
def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and false otherwise (meaning there are versions in the release notes directory that are
missing in the changelog, therefore they have been aggregated) and this function asserts that.
Note: The comparison is done against the release notes directory to avoid cases where there are missing versions in
the changelog due to inconsistent versions numbering, such as major version bumps. (For example, if the versions
1.2.7 and 1.3.0 are two consecutive keys in the changelog, we need to determine if 1.3.0 has aggregated the versions
1.2.8-1.3.0, OR 1.3.0 is the consecutive version right after 1.2.7 but is a major bump. in order to check that, we
check it against the files in the release notes directory.)
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if not changelog.get(version):
return False
all_rn_versions = []
lowest_version = [LooseVersion('1.0.0')]
for filename in os.listdir(release_notes_dir):
current_version = release_notes_file_to_version(filename)
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
def release_notes_file_to_version(rn_file_name):
return rn_file_name.replace('.md', '').replace('_', '.')
|
the-stack_0_27228
|
# coding: utf-8
# In[1]:
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
# In[2]:
train_df = pd.read_csv('C:/Users/Nikose/Desktop/Titanic/train.csv')
test_df = pd.read_csv('C:/Users/Nikose/Desktop/Titanic/test.csv')
combine = [train_df, test_df]
# In[3]:
print(train_df.columns.values)
# In[4]:
train_df.head(2)
# In[5]:
train_df.isnull().sum()
# In[6]:
train_df.describe()
# In[7]:
train_df.describe(include=['O'])
# In[8]:
train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[9]:
train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[10]:
train_df[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[12]:
train_df[["Parch", "Survived"]].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[17]:
print("Before", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape)
train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
combine = [train_df, test_df]
"After", train_df.shape, test_df.shape, combine[0].shape, combine[1].shape
# In[18]:
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'])
# In[19]:
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
# In[20]:
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df.head()
# In[21]:
train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
# In[22]:
for dataset in combine:
dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
train_df.head()
# In[23]:
grid = sns.FacetGrid(train_df, row='Pclass', col='Sex', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend()
# In[24]:
guess_ages = np.zeros((2,3))
guess_ages
# In[25]:
for dataset in combine:
for i in range(0, 2):
for j in range(0, 3):
guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)]['Age'].dropna()
# age_mean = guess_df.mean()
# age_std = guess_df.std()
# age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std)
age_guess = guess_df.median()
# Convert random age float to nearest .5 age
guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j+1), 'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.head()
# In[26]:
train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True)
# In[27]:
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train_df.head()
# In[28]:
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
# In[29]:
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[30]:
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
# In[31]:
train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head()
# In[32]:
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10)
# In[33]:
freq_port = train_df.Embarked.dropna().mode()[0]
freq_port
# In[34]:
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# In[35]:
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head()
# In[36]:
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
test_df.head()
# In[37]:
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
# In[38]:
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head(10)
# In[39]:
test_df.head(10)
# In[40]:
X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# In[41]:
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train) * 100, 2)
acc_log
# In[42]:
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# In[43]:
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, Y_train) * 100, 2)
acc_svc
# In[44]:
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
acc_knn = round(knn.score(X_train, Y_train) * 100, 2)
acc_knn
# In[45]:
# Gaussian Naive Bayes
gaussian = GaussianNB()
gaussian.fit(X_train, Y_train)
Y_pred = gaussian.predict(X_test)
acc_gaussian = round(gaussian.score(X_train, Y_train) * 100, 2)
acc_gaussian
# In[46]:
# Perceptron
perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
Y_pred = perceptron.predict(X_test)
acc_perceptron = round(perceptron.score(X_train, Y_train) * 100, 2)
acc_perceptron
# In[47]:
# Linear SVC
linear_svc = LinearSVC()
linear_svc.fit(X_train, Y_train)
Y_pred = linear_svc.predict(X_test)
acc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2)
acc_linear_svc
# In[48]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
sgd.fit(X_train, Y_train)
Y_pred = sgd.predict(X_test)
acc_sgd = round(sgd.score(X_train, Y_train) * 100, 2)
acc_sgd
# In[49]:
# Decision Tree
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, Y_train)
Y_pred = decision_tree.predict(X_test)
acc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2)
acc_decision_tree
# In[55]:
# Random Forest
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
Y_pred_11 = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# In[51]:
models = pd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Decent', 'Linear SVC',
'Decision Tree'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree]})
models.sort_values(by='Score', ascending=False)
# In[52]:
from sklearn.ensemble import GradientBoostingClassifier
gradient_boost = GradientBoostingClassifier(n_estimators=100)
gradient_boost.fit(X_train,Y_train)
Y_pred_2 = gradient_boost.predict(X_test)
gradient_boost.score(X_train,Y_train)
# In[58]:
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred_2
})
# In[59]:
submission.to_csv('C:/Users/Nikose/Desktop/Titanic/submission2.csv', index=False)
|
the-stack_0_27229
|
from django.shortcuts import render, redirect
from app.forms import ClientesForm
from app.models import Clientes
from django.core.paginator import Paginator
# Create your views here.
def home(request):
data = {}
search = request.GET.get('search')
if search:
data['db'] = Clientes.objects.filter(email__icontains=search)
else:
data['db'] = Clientes.objects.all()
return render(request, 'index.html', data)
def cadastro(request):
data = {}
data['cadastro'] = ClientesForm
return render(request, 'cadastro.html', data)
def create(request):
cadastro = ClientesForm(request.POST or None)
if cadastro.is_valid():
cadastro.save()
return redirect('home')
def view(request, pk):
data = {}
data['db'] = Clientes.objects.get(pk=pk)
return render(request, 'view.html', data)
def edit(request, pk):
data = {}
data['db'] = Clientes.objects.get(pk=pk)
data['cadastro'] = ClientesForm(instance=data['db'])
return render(request, 'cadastro.html', data)
def update(request, pk):
data = {}
data['db'] = Clientes.objects.get(pk=pk)
cadastro = ClientesForm(request.POST or None, instance=data['db'])
if cadastro.is_valid():
cadastro.save()
return redirect('home')
def delete(request, pk):
db = Clientes.objects.get(pk=pk)
db.delete()
return redirect('home')
|
the-stack_0_27230
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
import datetime
import decimal
import os
import random
from io import BytesIO
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.orc
import pyorc as po
import pytest
import cudf
from cudf.core.dtypes import Decimal64Dtype
from cudf.io.orc import ORCWriter
from cudf.testing._utils import (
assert_eq,
gen_rand_series,
supported_numpy_dtypes,
)
@pytest.fixture(scope="module")
def datadir(datadir):
return datadir / "orc"
@pytest.fixture
def path_or_buf(datadir):
fname = datadir / "TestOrcFile.test1.orc"
try:
with open(fname, "rb") as f:
buffer = BytesIO(f.read())
except Exception as excpr:
if type(excpr).__name__ == "FileNotFoundError":
pytest.skip(".parquet file is not found")
else:
print(type(excpr).__name__)
def _make_path_or_buf(src):
if src == "filepath":
return str(fname)
if src == "pathobj":
return fname
if src == "bytes_io":
return buffer
if src == "bytes":
return buffer.getvalue()
if src == "url":
return fname.as_uri()
raise ValueError("Invalid source type")
yield _make_path_or_buf
@pytest.mark.filterwarnings("ignore:Using CPU")
@pytest.mark.filterwarnings("ignore:Strings are not yet supported")
@pytest.mark.parametrize("engine", ["pyarrow", "cudf"])
@pytest.mark.parametrize("use_index", [False, True])
@pytest.mark.parametrize(
"inputfile, columns",
[
("TestOrcFile.emptyFile.orc", ["boolean1"]),
(
"TestOrcFile.test1.orc",
[
"boolean1",
"byte1",
"short1",
"int1",
"long1",
"float1",
"double1",
],
),
("TestOrcFile.RLEv2.orc", ["x", "y"]),
("TestOrcFile.testSnappy.orc", None),
("TestOrcFile.demo-12-zlib.orc", ["_col2", "_col3", "_col4", "_col5"]),
],
)
def test_orc_reader_basic(datadir, inputfile, columns, use_index, engine):
path = datadir / inputfile
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
expect = orcfile.read(columns=columns).to_pandas()
got = cudf.read_orc(
path, engine=engine, columns=columns, use_index=use_index
)
assert_eq(expect, got, check_categorical=False)
def test_orc_reader_filenotfound(tmpdir):
with pytest.raises(FileNotFoundError):
cudf.read_orc("TestMissingFile.orc")
with pytest.raises(FileNotFoundError):
cudf.read_orc(tmpdir.mkdir("cudf_orc"))
def test_orc_reader_local_filepath():
path = "~/TestLocalFile.orc"
if not os.path.isfile(path):
pytest.skip("Local .orc file is not found")
cudf.read_orc(path)
@pytest.mark.parametrize(
"src", ["filepath", "pathobj", "bytes_io", "bytes", "url"]
)
def test_orc_reader_filepath_or_buffer(path_or_buf, src):
cols = ["int1", "long1", "float1", "double1"]
orcfile = pa.orc.ORCFile(path_or_buf("filepath"))
expect = orcfile.read(columns=cols).to_pandas()
got = cudf.read_orc(path_or_buf(src), columns=cols)
assert_eq(expect, got)
def test_orc_reader_trailing_nulls(datadir):
path = datadir / "TestOrcFile.nulls-at-end-snappy.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
expect = orcfile.read().to_pandas().fillna(0)
got = cudf.read_orc(path, engine="cudf").fillna(0)
# PANDAS uses NaN to represent invalid data, which forces float dtype
# For comparison, we can replace NaN with 0 and cast to the cuDF dtype
for col in expect.columns:
expect[col] = expect[col].astype(got[col].dtype)
assert_eq(expect, got, check_categorical=False)
@pytest.mark.parametrize("use_index", [False, True])
@pytest.mark.parametrize(
"inputfile",
["TestOrcFile.testDate1900.orc", "TestOrcFile.testDate2038.orc"],
)
def test_orc_reader_datetimestamp(datadir, inputfile, use_index):
path = datadir / inputfile
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas(date_as_object=False)
gdf = cudf.read_orc(path, engine="cudf", use_index=use_index)
assert_eq(pdf, gdf, check_categorical=False)
def test_orc_reader_strings(datadir):
path = datadir / "TestOrcFile.testStringAndBinaryStatistics.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
expect = orcfile.read(columns=["string1"])
got = cudf.read_orc(path, engine="cudf", columns=["string1"])
assert_eq(expect, got, check_categorical=False)
def test_orc_read_statistics(datadir):
# Read in file containing 2 columns ("int1" and "string1") and 3 stripes
# (sizes 5000, 5000 and 1000 respectively). Each stripe has the same value
# in every one of its rows. The values the stripes have are 1, 2, and 3 in
# "int1" and "one", "two", and "three" in "string1".
path = datadir / "TestOrcFile.testStripeLevelStats.orc"
try:
(
file_statistics,
stripes_statistics,
) = cudf.io.orc.read_orc_statistics([path, path])
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
# Check numberOfValues
assert_eq(file_statistics[0]["int1"]["number_of_values"], 11_000)
assert_eq(
file_statistics[0]["int1"]["number_of_values"],
sum(
[
stripes_statistics[0]["int1"]["number_of_values"],
stripes_statistics[1]["int1"]["number_of_values"],
stripes_statistics[2]["int1"]["number_of_values"],
]
),
)
assert_eq(
stripes_statistics[1]["int1"]["number_of_values"],
stripes_statistics[1]["string1"]["number_of_values"],
)
assert_eq(stripes_statistics[2]["string1"]["number_of_values"], 1_000)
# Check other statistics
assert_eq(stripes_statistics[2]["string1"]["has_null"], False)
assert_eq(
file_statistics[0]["int1"]["minimum"],
min(
stripes_statistics[0]["int1"]["minimum"],
stripes_statistics[1]["int1"]["minimum"],
stripes_statistics[2]["int1"]["minimum"],
),
)
assert_eq(file_statistics[0]["int1"]["minimum"], 1)
assert_eq(file_statistics[0]["string1"]["minimum"], "one")
@pytest.mark.parametrize("engine", ["cudf", "pyarrow"])
@pytest.mark.parametrize(
"predicate,expected_len",
[
([[("int1", "==", 1)]], 5000),
([[("int1", "<=", 2)]], 10000),
([[("int1", "==", -1)]], 0),
([[("int1", "in", range(3))]], 10000),
([[("int1", "in", {1, 3})]], 6000),
([[("int1", "not in", {1, 3})]], 5000),
],
)
def test_orc_read_filtered(datadir, engine, predicate, expected_len):
path = datadir / "TestOrcFile.testStripeLevelStats.orc"
try:
df_filtered = cudf.read_orc(path, engine=engine, filters=predicate)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
# Assert # of rows after filtering
assert len(df_filtered) == expected_len
@pytest.mark.parametrize("engine", ["cudf", "pyarrow"])
def test_orc_read_stripes(datadir, engine):
path = datadir / "TestOrcFile.testDate1900.orc"
try:
pdf = cudf.read_orc(path, engine=engine)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
num_rows, stripes, col_names = cudf.io.read_orc_metadata(path)
# Read stripes one at a time
gdf = [
cudf.read_orc(path, engine=engine, stripes=[[i]])
for i in range(stripes)
]
gdf = cudf.concat(gdf).reset_index(drop=True)
assert_eq(pdf, gdf, check_categorical=False)
# Read stripes all at once
gdf = cudf.read_orc(
path, engine=engine, stripes=[[int(x) for x in range(stripes)]]
)
assert_eq(pdf, gdf, check_categorical=False)
# Read only some stripes
gdf = cudf.read_orc(path, engine=engine, stripes=[[0, 1]])
assert_eq(gdf, pdf.head(25000))
gdf = cudf.read_orc(path, engine=engine, stripes=[[0, stripes - 1]])
assert_eq(
gdf, cudf.concat([pdf.head(15000), pdf.tail(10000)], ignore_index=True)
)
@pytest.mark.parametrize("num_rows", [1, 100, 3000])
@pytest.mark.parametrize("skiprows", [0, 1, 3000])
def test_orc_read_rows(datadir, skiprows, num_rows):
path = datadir / "TestOrcFile.decimal.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(
path, engine="cudf", skiprows=skiprows, num_rows=num_rows
)
# Slice rows out of the whole dataframe for comparison as PyArrow doesn't
# have an API to read a subsection of rows from the file
pdf = pdf[skiprows : skiprows + num_rows]
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
def test_orc_read_skiprows(tmpdir):
buff = BytesIO()
df = pd.DataFrame(
{"a": [1, 0, 1, 0, None, 1, 1, 1, 0, None, 0, 0, 1, 1, 1, 1]},
dtype=pd.BooleanDtype(),
)
writer = po.Writer(buff, po.Struct(a=po.Boolean()))
tuples = list(
map(
lambda x: (None,) if x[0] is pd.NA else x,
list(df.itertuples(index=False, name=None)),
)
)
writer.writerows(tuples)
writer.close()
skiprows = 10
expected = cudf.read_orc(buff)[skiprows::].reset_index(drop=True)
got = cudf.read_orc(buff, skiprows=skiprows)
assert_eq(expected, got)
def test_orc_reader_uncompressed_block(datadir):
path = datadir / "uncompressed_snappy.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
expect = orcfile.read().to_pandas()
got = cudf.read_orc(path, engine="cudf")
assert_eq(expect, got, check_categorical=False)
def test_orc_reader_nodata_block(datadir):
path = datadir / "nodata.orc"
try:
orcfile = pa.orc.ORCFile(path)
except Exception as excpr:
if type(excpr).__name__ == "ArrowIOError":
pytest.skip(".orc file is not found")
else:
print(type(excpr).__name__)
expect = orcfile.read().to_pandas()
got = cudf.read_orc(path, engine="cudf", num_rows=1)
assert_eq(expect, got, check_categorical=False)
@pytest.mark.parametrize("compression", [None, "snappy"])
@pytest.mark.parametrize(
"reference_file, columns",
[
(
"TestOrcFile.test1.orc",
[
"boolean1",
"byte1",
"short1",
"int1",
"long1",
"float1",
"double1",
],
),
("TestOrcFile.demo-12-zlib.orc", ["_col1", "_col3", "_col5"]),
],
)
def test_orc_writer(datadir, tmpdir, reference_file, columns, compression):
pdf_fname = datadir / reference_file
gdf_fname = tmpdir.join("gdf.orc")
try:
orcfile = pa.orc.ORCFile(pdf_fname)
except Exception as excpr:
if type(excpr).__name__ == "ArrowIOError":
pytest.skip(".orc file is not found")
else:
print(type(excpr).__name__)
expect = orcfile.read(columns=columns).to_pandas()
cudf.from_pandas(expect).to_orc(gdf_fname.strpath, compression=compression)
got = pa.orc.ORCFile(gdf_fname).read(columns=columns).to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("compression", [None, "snappy"])
@pytest.mark.parametrize(
"reference_file, columns",
[
(
"TestOrcFile.test1.orc",
[
"boolean1",
"byte1",
"short1",
"int1",
"long1",
"float1",
"double1",
],
),
("TestOrcFile.demo-12-zlib.orc", ["_col1", "_col3", "_col5"]),
],
)
def test_chunked_orc_writer(
datadir, tmpdir, reference_file, columns, compression
):
pdf_fname = datadir / reference_file
gdf_fname = tmpdir.join("chunked_gdf.orc")
try:
orcfile = pa.orc.ORCFile(pdf_fname)
except Exception as excpr:
if type(excpr).__name__ == "ArrowIOError":
pytest.skip(".orc file is not found")
else:
print(type(excpr).__name__)
pdf = orcfile.read(columns=columns).to_pandas()
gdf = cudf.from_pandas(pdf)
expect = pd.concat([pdf, pdf]).reset_index(drop=True)
writer = ORCWriter(gdf_fname, compression=compression)
writer.write_table(gdf)
writer.write_table(gdf)
writer.close()
got = pa.orc.ORCFile(gdf_fname).read(columns=columns).to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtypes",
[
{"c": str, "a": int},
{"c": int, "a": str},
{"c": int, "a": str, "b": float},
{"c": str, "a": object},
],
)
def test_orc_writer_strings(tmpdir, dtypes):
gdf_fname = tmpdir.join("gdf_strings.orc")
expect = cudf.datasets.randomdata(nrows=10, dtypes=dtypes, seed=1)
expect.to_orc(gdf_fname)
got = pa.orc.ORCFile(gdf_fname).read().to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtypes",
[
{"c": str, "a": int},
{"c": int, "a": str},
{"c": int, "a": str, "b": float},
{"c": str, "a": object},
],
)
def test_chunked_orc_writer_strings(tmpdir, dtypes):
gdf_fname = tmpdir.join("chunked_gdf_strings.orc")
gdf = cudf.datasets.randomdata(nrows=10, dtypes=dtypes, seed=1)
pdf = gdf.to_pandas()
expect = pd.concat([pdf, pdf]).reset_index(drop=True)
writer = ORCWriter(gdf_fname)
writer.write_table(gdf)
writer.write_table(gdf)
writer.close()
got = pa.orc.ORCFile(gdf_fname).read().to_pandas()
assert_eq(expect, got)
def test_orc_writer_sliced(tmpdir):
cudf_path = tmpdir.join("cudf.orc")
df = pd.DataFrame()
df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"])
df = cudf.from_pandas(df)
df_select = df.iloc[1:3]
df_select.to_orc(cudf_path)
assert_eq(cudf.read_orc(cudf_path), df_select.reset_index(drop=True))
@pytest.mark.parametrize(
"orc_file",
[
"TestOrcFile.decimal.orc",
"TestOrcFile.decimal.same.values.orc",
"TestOrcFile.decimal.multiple.values.orc",
# For addional information take look at PR 7034
"TestOrcFile.decimal.runpos.issue.orc",
],
)
def test_orc_reader_decimal_type(datadir, orc_file):
file_path = datadir / orc_file
try:
orcfile = pa.orc.ORCFile(file_path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
df = cudf.read_orc(file_path).to_pandas()
assert_eq(pdf, df)
def test_orc_decimal_precision_fail(datadir):
file_path = datadir / "TestOrcFile.int_decimal.precision_19.orc"
try:
orcfile = pa.orc.ORCFile(file_path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
# Max precision supported is 18 (Decimal64Dtype limit)
# and the data has the precision 19. This test should be removed
# once Decimal128Dtype is introduced.
with pytest.raises(RuntimeError):
cudf.read_orc(file_path)
# Shouldn't cause failure if decimal column is not chosen to be read.
pdf = orcfile.read(columns=["int"]).to_pandas()
gdf = cudf.read_orc(file_path, columns=["int"])
assert_eq(pdf, gdf)
# For addional information take look at PR 6636 and 6702
@pytest.mark.parametrize(
"orc_file",
[
"TestOrcFile.boolean_corruption_PR_6636.orc",
"TestOrcFile.boolean_corruption_PR_6702.orc",
],
)
def test_orc_reader_boolean_type(datadir, orc_file):
file_path = datadir / orc_file
pdf = pd.read_orc(file_path)
df = cudf.read_orc(file_path).to_pandas()
assert_eq(pdf, df)
@pytest.mark.filterwarnings("ignore:Using CPU")
def test_orc_reader_tzif_timestamps(datadir):
# Contains timstamps in the range covered by the TZif file
# Other timedate tests only cover "future" times
path = datadir / "TestOrcFile.lima_timezone.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(path, engine="cudf").to_pandas()
assert_eq(pdf, gdf)
def test_int_overflow(tmpdir):
file_path = tmpdir.join("gdf_overflow.orc")
# The number of rows and the large element trigger delta encoding
num_rows = 513
df = cudf.DataFrame({"a": [None] * num_rows}, dtype="int32")
df["a"][0] = 1024 * 1024 * 1024
df["a"][num_rows - 1] = 1
df.to_orc(file_path)
assert_eq(cudf.read_orc(file_path), df)
def normalized_equals(value1, value2):
if isinstance(value1, (datetime.datetime, np.datetime64)):
value1 = np.datetime64(value1, "ms")
if isinstance(value2, (datetime.datetime, np.datetime64)):
value2 = np.datetime64(value2, "ms")
# Compare integers with floats now
if isinstance(value1, float) or isinstance(value2, float):
return np.isclose(value1, value2)
return value1 == value2
@pytest.mark.parametrize("nrows", [1, 100, 6000000])
def test_orc_write_statistics(tmpdir, datadir, nrows):
supported_stat_types = supported_numpy_dtypes + ["str"]
# Can't write random bool columns until issue #6763 is fixed
if nrows == 6000000:
supported_stat_types.remove("bool")
# Make a dataframe
gdf = cudf.DataFrame(
{
"col_" + str(dtype): gen_rand_series(dtype, nrows, has_nulls=True)
for dtype in supported_stat_types
}
)
fname = tmpdir.join("gdf.orc")
# Write said dataframe to ORC with cuDF
gdf.to_orc(fname.strpath)
# Read back written ORC's statistics
orc_file = pa.orc.ORCFile(fname)
(file_stats, stripes_stats,) = cudf.io.orc.read_orc_statistics([fname])
# check file stats
for col in gdf:
if "minimum" in file_stats[0][col]:
stats_min = file_stats[0][col]["minimum"]
actual_min = gdf[col].min()
assert normalized_equals(actual_min, stats_min)
if "maximum" in file_stats[0][col]:
stats_max = file_stats[0][col]["maximum"]
actual_max = gdf[col].max()
assert normalized_equals(actual_max, stats_max)
# compare stripe statistics with actual min/max
for stripe_idx in range(0, orc_file.nstripes):
stripe = orc_file.read_stripe(stripe_idx)
# pandas is unable to handle min/max of string col with nulls
stripe_df = cudf.DataFrame(stripe.to_pandas())
for col in stripe_df:
if "minimum" in stripes_stats[stripe_idx][col]:
actual_min = stripe_df[col].min()
stats_min = stripes_stats[stripe_idx][col]["minimum"]
assert normalized_equals(actual_min, stats_min)
if "maximum" in stripes_stats[stripe_idx][col]:
actual_max = stripe_df[col].max()
stats_max = stripes_stats[stripe_idx][col]["maximum"]
assert normalized_equals(actual_max, stats_max)
@pytest.mark.parametrize("nrows", [1, 100, 6000000])
def test_orc_write_bool_statistics(tmpdir, datadir, nrows):
# Make a dataframe
gdf = cudf.DataFrame({"col_bool": gen_rand_series("bool", nrows)})
fname = tmpdir.join("gdf.orc")
# Write said dataframe to ORC with cuDF
gdf.to_orc(fname.strpath)
# Read back written ORC's statistics
orc_file = pa.orc.ORCFile(fname)
(file_stats, stripes_stats,) = cudf.io.orc.read_orc_statistics([fname])
# check file stats
col = "col_bool"
if "true_count" in file_stats[0][col]:
stats_true_count = file_stats[0][col]["true_count"]
actual_true_count = gdf[col].sum()
assert normalized_equals(actual_true_count, stats_true_count)
if "number_of_values" in file_stats[0][col]:
stats_valid_count = file_stats[0][col]["number_of_values"]
actual_valid_count = gdf[col].valid_count
assert normalized_equals(actual_valid_count, stats_valid_count)
# compare stripe statistics with actual min/max
for stripe_idx in range(0, orc_file.nstripes):
stripe = orc_file.read_stripe(stripe_idx)
# pandas is unable to handle min/max of string col with nulls
stripe_df = cudf.DataFrame(stripe.to_pandas())
if "true_count" in stripes_stats[stripe_idx][col]:
actual_true_count = stripe_df[col].sum()
stats_true_count = stripes_stats[stripe_idx][col]["true_count"]
assert normalized_equals(actual_true_count, stats_true_count)
if "number_of_values" in stripes_stats[stripe_idx][col]:
actual_valid_count = stripe_df[col].valid_count
stats_valid_count = stripes_stats[stripe_idx][col][
"number_of_values"
]
assert normalized_equals(actual_valid_count, stats_valid_count)
def test_orc_reader_gmt_timestamps(datadir):
path = datadir / "TestOrcFile.gmt.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(path, engine="cudf").to_pandas()
assert_eq(pdf, gdf)
def test_orc_bool_encode_fail():
np.random.seed(0)
buffer = BytesIO()
# Generate a boolean column longer than a single stripe
fail_df = cudf.DataFrame({"col": gen_rand_series("bool", 600000)})
# Invalidate the first row in the second stripe to break encoding
fail_df["col"][500000] = None
# Should throw instead of generating a file that is incompatible
# with other readers (see issue #6763)
with pytest.raises(RuntimeError):
fail_df.to_orc(buffer)
# Generate a boolean column that fits into a single stripe
okay_df = cudf.DataFrame({"col": gen_rand_series("bool", 500000)})
okay_df["col"][500000 - 1] = None
# Invalid row is in the last row group of the stripe;
# encoding is assumed to be correct
okay_df.to_orc(buffer)
# Also validate data
pdf = pa.orc.ORCFile(buffer).read().to_pandas()
assert_eq(okay_df, pdf)
def test_nanoseconds_overflow():
buffer = BytesIO()
# Use nanosecond values that take more than 32 bits to encode
s = cudf.Series([710424008, -1338482640], dtype="datetime64[ns]")
expected = cudf.DataFrame({"s": s})
expected.to_orc(buffer)
cudf_got = cudf.read_orc(buffer)
assert_eq(expected, cudf_got)
pyarrow_got = pa.orc.ORCFile(buffer).read()
assert_eq(expected.to_pandas(), pyarrow_got.to_pandas())
def test_empty_dataframe():
buffer = BytesIO()
expected = cudf.DataFrame()
expected.to_orc(buffer)
# Raise error if column name is mentioned, but it doesn't exist.
with pytest.raises(RuntimeError):
cudf.read_orc(buffer, columns=["a"])
got_df = cudf.read_orc(buffer)
expected_pdf = pd.read_orc(buffer)
assert_eq(expected, got_df)
assert_eq(expected_pdf, got_df)
@pytest.mark.parametrize(
"data", [[None, ""], ["", None], [None, None], ["", ""]]
)
def test_empty_string_columns(data):
buffer = BytesIO()
expected = cudf.DataFrame({"string": data}, dtype="str")
expected.to_orc(buffer)
expected_pdf = pd.read_orc(buffer)
got_df = cudf.read_orc(buffer)
assert_eq(expected, got_df)
assert_eq(expected_pdf, got_df)
@pytest.mark.parametrize("scale", [-3, 0, 3])
def test_orc_writer_decimal(tmpdir, scale):
np.random.seed(0)
fname = tmpdir / "decimal.orc"
expected = cudf.DataFrame({"dec_val": gen_rand_series("i", 100)})
expected["dec_val"] = expected["dec_val"].astype(Decimal64Dtype(7, scale))
expected.to_orc(fname)
got = pd.read_orc(fname)
assert_eq(expected.to_pandas()["dec_val"], got["dec_val"])
@pytest.mark.parametrize("num_rows", [1, 100, 3000])
def test_orc_reader_multiple_files(datadir, num_rows):
path = datadir / "TestOrcFile.testSnappy.orc"
df_1 = pd.read_orc(path)
df_2 = pd.read_orc(path)
df = pd.concat([df_1, df_2], ignore_index=True)
gdf = cudf.read_orc(
[path, path], engine="cudf", num_rows=num_rows
).to_pandas()
# Slice rows out of the whole dataframe for comparison as PyArrow doesn't
# have an API to read a subsection of rows from the file
df = df[:num_rows]
df = df.reset_index(drop=True)
assert_eq(df, gdf)
def test_orc_reader_multi_file_single_stripe(datadir):
path = datadir / "TestOrcFile.testSnappy.orc"
# should raise an exception
with pytest.raises(ValueError):
cudf.read_orc([path, path], engine="cudf", stripes=[0])
def test_orc_reader_multi_file_multi_stripe(datadir):
path = datadir / "TestOrcFile.testStripeLevelStats.orc"
gdf = cudf.read_orc([path, path], engine="cudf", stripes=[[0, 1], [2]])
pdf = pd.read_orc(path)
assert_eq(pdf, gdf)
def test_orc_string_stream_offset_issue():
size = 30000
vals = {
str(x): [decimal.Decimal(1)] * size if x != 0 else ["XYZ"] * size
for x in range(0, 5)
}
df = cudf.DataFrame(vals)
buffer = BytesIO()
df.to_orc(buffer)
assert_eq(df, cudf.read_orc(buffer))
def generate_list_struct_buff(size=28000):
rd = random.Random(0)
np.random.seed(seed=0)
buff = BytesIO()
schema = {
"lvl3_list": po.Array(po.Array(po.Array(po.BigInt()))),
"lvl1_list": po.Array(po.BigInt()),
"lvl1_struct": po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}),
"lvl2_struct": po.Struct(
**{
"a": po.BigInt(),
"lvl1_struct": po.Struct(
**{"c": po.BigInt(), "d": po.BigInt()}
),
}
),
"list_nests_struct": po.Array(
po.Array(po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}))
),
"struct_nests_list": po.Struct(
**{
"struct": po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}),
"list": po.Array(po.BigInt()),
}
),
}
schema = po.Struct(**schema)
lvl3_list = [
[
[
[
rd.choice([None, np.random.randint(1, 3)])
for z in range(np.random.randint(1, 3))
]
for z in range(np.random.randint(0, 3))
]
for y in range(np.random.randint(0, 3))
]
for x in range(size)
]
lvl1_list = [
[
rd.choice([None, np.random.randint(0, 3)])
for y in range(np.random.randint(1, 4))
]
for x in range(size)
]
lvl1_struct = [
(np.random.randint(0, 3), np.random.randint(0, 3)) for x in range(size)
]
lvl2_struct = [
(
rd.choice([None, np.random.randint(0, 3)]),
(
rd.choice([None, np.random.randint(0, 3)]),
np.random.randint(0, 3),
),
)
for x in range(size)
]
list_nests_struct = [
[
[rd.choice(lvl1_struct), rd.choice(lvl1_struct)]
for y in range(np.random.randint(1, 4))
]
for x in range(size)
]
struct_nests_list = [(lvl1_struct[x], lvl1_list[x]) for x in range(size)]
df = pd.DataFrame(
{
"lvl3_list": lvl3_list,
"lvl1_list": lvl1_list,
"lvl1_struct": lvl1_struct,
"lvl2_struct": lvl2_struct,
"list_nests_struct": list_nests_struct,
"struct_nests_list": struct_nests_list,
}
)
writer = po.Writer(buff, schema, stripe_size=1024)
tuples = list(
map(
lambda x: (None,) if x[0] is pd.NA else x,
list(df.itertuples(index=False, name=None)),
)
)
writer.writerows(tuples)
writer.close()
return buff
list_struct_buff = generate_list_struct_buff()
@pytest.mark.parametrize(
"columns",
[
None,
["lvl3_list", "list_nests_struct", "lvl2_struct", "struct_nests_list"],
["lvl2_struct", "lvl1_struct"],
],
)
@pytest.mark.parametrize("num_rows", [0, 15, 1005, 10561, 28000])
@pytest.mark.parametrize("use_index", [True, False])
@pytest.mark.parametrize("skip_rows", [0, 101, 1007, 27000])
def test_lists_struct_nests(
columns, num_rows, use_index, skip_rows,
):
has_lists = (
any("list" in col_name for col_name in columns) if columns else True
)
if has_lists and skip_rows > 0:
with pytest.raises(
RuntimeError, match="skip_rows is not supported by list column"
):
cudf.read_orc(
list_struct_buff,
columns=columns,
num_rows=num_rows,
use_index=use_index,
skiprows=skip_rows,
)
else:
gdf = cudf.read_orc(
list_struct_buff,
columns=columns,
num_rows=num_rows,
use_index=use_index,
skiprows=skip_rows,
)
pyarrow_tbl = pyarrow.orc.ORCFile(list_struct_buff).read()
pyarrow_tbl = (
pyarrow_tbl[skip_rows : skip_rows + num_rows]
if columns is None
else pyarrow_tbl.select(columns)[skip_rows : skip_rows + num_rows]
)
if num_rows > 0:
assert_eq(True, pyarrow_tbl.equals(gdf.to_arrow()))
else:
assert_eq(pyarrow_tbl.to_pandas(), gdf)
@pytest.mark.parametrize(
"data", [["_col0"], ["FakeName", "_col0", "TerriblyFakeColumnName"]]
)
def test_orc_reader_decimal(datadir, data):
path = datadir / "TestOrcFile.decimal.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(
path, engine="cudf", decimal_cols_as_float=data
).to_pandas()
# Convert the decimal dtype from PyArrow to float64 for comparison to cuDF
# This is because cuDF returns as float64
pdf = pdf.apply(pd.to_numeric)
assert_eq(pdf, gdf)
@pytest.mark.parametrize("data", [["InvalidColumnName"]])
def test_orc_reader_decimal_invalid_column(datadir, data):
path = datadir / "TestOrcFile.decimal.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(
path, engine="cudf", decimal_cols_as_float=data
).to_pandas()
# Since the `decimal_cols_as_float` column name
# is invalid, this should be a decimal
assert_eq(pdf, gdf)
# This test case validates the issue raised in #8665,
# please check the issue for more details.
def test_orc_timestamp_read(datadir):
path = datadir / "TestOrcFile.timestamp.issue.orc"
pdf = pd.read_orc(path)
gdf = cudf.read_orc(path)
assert_eq(pdf, gdf)
|
the-stack_0_27232
|
from .louvain_utilities import singlelayer_louvain, multilayer_louvain
from .parameter_estimation_utilities import louvain_part_with_membership, estimate_singlelayer_SBM_parameters, \
gamma_estimate_from_parameters, omega_function_from_model, estimate_multilayer_SBM_parameters
from .partition_utilities import in_degrees
import louvain
def iterative_monolayer_resolution_parameter_estimation(G, gamma=1.0, tol=1e-2, max_iter=25, verbose=False,
method="louvain"):
"""Monolayer variant of ALG. 1 from "Relating modularity maximization and stochastic block models in multilayer
networks." This is intended to determine an "optimal" value for gamma by repeatedly maximizing modularity and
estimating new values for the resolution parameter.
See https://doi.org/10.1137/18M1231304 for more details.
:param G: graph of interest
:type G: igraph.Graph
:param gamma: initialization gamma value
:type gamma: float
:param tol: convergence tolerance
:type tol: float
:param max_iter: maximum number of iterations
:type max_iter: int
:param verbose: whether or not to print verbose output
:type verbose: bool
:param method: community detection method to use
:type method: str
:return:
- gamma to which the iteration converged
- the resulting partition
:rtype: tuple[float, louvain.RBConfigurationVertexPartition]
"""
if 'weight' not in G.es:
G.es['weight'] = [1.0] * G.ecount()
m = sum(G.es['weight'])
if method == "louvain":
def maximize_modularity(resolution_param):
return singlelayer_louvain(G, resolution_param, return_partition=True)
elif method == "2-spinglass":
def maximize_modularity(resolution_param):
membership = G.community_spinglass(spins=2, gamma=resolution_param).membership
return louvain_part_with_membership(G, membership)
else:
raise ValueError(f"Community detection method {method} not supported")
def estimate_SBM_parameters(partition):
return estimate_singlelayer_SBM_parameters(G, partition, m=m)
def update_gamma(omega_in, omega_out):
return gamma_estimate_from_parameters(omega_in, omega_out)
part, last_gamma = None, None
for iteration in range(max_iter):
part = maximize_modularity(gamma)
omega_in, omega_out = estimate_SBM_parameters(part)
last_gamma = gamma
gamma = update_gamma(omega_in, omega_out)
if gamma is None:
raise ValueError(f"gamma={last_gamma:.3f} resulted in degenerate partition")
if verbose:
print(f"Iter {iteration:>2}: {len(part)} communities with Q={part.q:.3f} and "
f"gamma={last_gamma:.3f}->{gamma:.3f}")
if abs(gamma - last_gamma) < tol:
break # gamma converged
else:
if verbose:
print(f"Gamma failed to converge within {max_iter} iterations. "
f"Final move of {abs(gamma - last_gamma):.3f} was not within tolerance {tol}")
if verbose:
print(f"Returned {len(part)} communities with Q={part.q:.3f} and gamma={gamma:.3f}")
return gamma, part
def check_multilayer_graph_consistency(G_intralayer, G_interlayer, layer_vec, model, m_t, T, N=None, Nt=None):
"""
Checks that the structures of the intralayer and interlayer graphs are consistent and match the given model.
:param G_intralayer: input graph containing all intra-layer edges
:param G_interlayer: input graph containing all inter-layer edges
:param layer_vec: vector of each vertex's layer membership
:param model: network layer topology (temporal, multilevel, multiplex)
:param m_t: vector of total edge weights per layer
:param T: number of layers in input graph
:param N: number of nodes per layer
:param Nt: vector of nodes per layer
"""
rules = [T > 1,
"Graph must have multiple layers",
G_interlayer.is_directed(),
"Interlayer graph should be directed",
G_interlayer.vcount() == G_intralayer.vcount(),
"Inter-layer and Intra-layer graphs must be of the same size",
len(layer_vec) == G_intralayer.vcount(),
"Layer membership vector must have length matching graph size",
all(m > 0 for m in m_t),
"All layers of graph must contain edges",
all(layer_vec[e.source] == layer_vec[e.target] for e in G_intralayer.es),
"Intralayer graph should not contain edges across layers",
model != 'temporal' or G_interlayer.ecount() == N * (T - 1),
"Interlayer temporal graph must contain (nodes per layer) * (number of layers - 1) edges",
model != 'temporal' or (G_interlayer.vcount() % T == 0 and G_intralayer.vcount() % T == 0),
"Vertex count of a temporal graph should be a multiple of the number of layers",
model != 'temporal' or all(nt == N for nt in Nt),
"Temporal networks must have the same number of nodes in every layer",
model != 'multilevel' or all(nt > 0 for nt in Nt),
"All layers of a multilevel graph must be consecutive and nonempty",
model != 'multilevel' or all(in_degree <= 1 for in_degree in in_degrees(G_interlayer)),
"Multilevel networks should have at most one interlayer in-edge per node",
model != 'multiplex' or all(nt == N for nt in Nt),
"Multiplex networks must have the same number of nodes in every layer",
model != 'multiplex' or G_interlayer.ecount() == N * T * (T - 1),
"Multiplex interlayer networks must contain edges between all pairs of layers"]
checks, messages = rules[::2], rules[1::2]
if not all(checks):
raise ValueError("Input graph is malformed\n" + "\n".join(m for c, m in zip(checks, messages) if not c))
def iterative_multilayer_resolution_parameter_estimation(G_intralayer, G_interlayer, layer_vec, gamma=1.0, omega=1.0,
gamma_tol=1e-2, omega_tol=5e-2, omega_max=1000, max_iter=25,
model='temporal', verbose=False):
"""
Multilayer variant of ALG. 1 from "Relating modularity maximization and stochastic block models in multilayer
networks." The nested functions here are just used to match the pseudocode in the paper.
:param G_intralayer: intralayer graph of interest
:type G_intralayer: igraph.Graph
:param G_interlayer: interlayer graph of interest
:type G_interlayer: igraph.Graph
:param layer_vec: list of each vertex's layer membership
:type layer_vec: list[int]
:param gamma: starting gamma value
:type gamma: float
:param omega: starting omega value
:type omega: float
:param gamma_tol: convergence tolerance for gamma
:type gamma_tol: float
:param omega_tol: convergence tolerance for omega
:type omega_tol: float
:param omega_max: maximum allowed value for omega
:type omega_max: float
:param max_iter: maximum number of iterations
:type max_iter: int
:param model: network layer topology (temporal, multilevel, multiplex)
:type model: str
:param verbose: whether or not to print verbose output
:type verbose: bool
:return:
- gamma to which the iteration converged
- omega to which the iteration converged
- the resulting partition
:rtype: tuple[float, float, tuple[int]]
"""
if 'weight' not in G_intralayer.es:
G_intralayer.es['weight'] = [1.0] * G_intralayer.ecount()
if 'weight' not in G_interlayer.es:
G_interlayer.es['weight'] = [1.0] * G_interlayer.ecount()
T = max(layer_vec) + 1 # layer count
optimiser = louvain.Optimiser()
# compute total edge weights per layer
m_t = [0] * T
for e in G_intralayer.es:
m_t[layer_vec[e.source]] += e['weight']
# compute total node counts per layer
N = G_intralayer.vcount() // T
Nt = [0] * T
for layer in layer_vec:
Nt[layer] += 1
check_multilayer_graph_consistency(G_intralayer, G_interlayer, layer_vec, model, m_t, T, N, Nt)
update_omega = omega_function_from_model(model, omega_max, T=T)
update_gamma = gamma_estimate_from_parameters
def maximize_modularity(intralayer_resolution, interlayer_resolution):
return multilayer_louvain(G_intralayer, G_interlayer, layer_vec, intralayer_resolution, interlayer_resolution,
optimiser=optimiser, return_partition=True)
def estimate_SBM_parameters(partition):
return estimate_multilayer_SBM_parameters(G_intralayer, G_interlayer, layer_vec, partition, model,
N=N, T=T, Nt=Nt, m_t=m_t)
part, K, last_gamma, last_omega = (None,) * 4
for iteration in range(max_iter):
part = maximize_modularity(gamma, omega)
theta_in, theta_out, p, K = estimate_SBM_parameters(part)
if not 0.0 <= p <= 1.0:
raise ValueError(f"gamma={gamma:.3f}, omega={omega:.3f} resulted in impossible estimate p={p:.3f}")
last_gamma, last_omega = gamma, omega
gamma = update_gamma(theta_in, theta_out)
if gamma is None:
raise ValueError(f"gamma={last_gamma:.3f}, omega={last_omega:.3f} resulted in degenerate partition")
omega = update_omega(theta_in, theta_out, p, K)
if verbose:
print(f"Iter {iteration:>2}: {K} communities with Q={part.q:.3f}, gamma={last_gamma:.3f}->{gamma:.3f}, "
f"omega={last_omega:.3f}->{omega:.3f}, and p={p:.3f}")
if abs(gamma - last_gamma) < gamma_tol and abs(omega - last_omega) < omega_tol:
break # gamma and omega converged
else:
if verbose:
print(f"Parameters failed to converge within {max_iter} iterations. "
f"Final move of ({abs(gamma - last_gamma):.3f}, {abs(omega - last_omega):.3f}) "
f"was not within tolerance ({gamma_tol}, {omega_tol})")
if verbose:
print(f"Returned {K} communities with Q={part.q:.3f}, gamma={gamma:.3f}, and omega={omega:.3f}")
return gamma, omega, part
|
the-stack_0_27233
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelSummary
from pytorch_lightning.callbacks.timer import Timer
from pytorch_lightning.plugins.environments.torchelastic_environment import TorchElasticEnvironment
from pytorch_lightning.plugins.precision.native_amp import NativeMixedPrecisionPlugin
from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.parts.nlp_overrides import GradScaler, MegatronHalfPrecisionPlugin, NLPDDPPlugin
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import StatelessTimer, exp_manager
@hydra_runner(config_path="conf", config_name="megatron_t5_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = [
NLPDDPPlugin(
no_ddp_communication_hook=(
megatron_amp_o2 and cfg.trainer.precision == 'bf16'
), # Only bf16 uses fp32_grad_accum.
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
]
if cfg.trainer.precision in [16, 'bf16']:
scaler = None
if cfg.trainer.precision == 16:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler))
else:
plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, **cfg.trainer, callbacks=[ModelSummary(max_depth=3)])
exp_manager(trainer, cfg.exp_manager)
# update resume from checkpoint found by exp_manager
resume_from_checkpoint = trainer.checkpoint_connector.resume_from_checkpoint_fit_path
logging.info(f'Resuming training from checkpoint: {resume_from_checkpoint}')
trainer.checkpoint_connector = CheckpointConnector(trainer, resume_from_checkpoint=resume_from_checkpoint)
# Override timer callback to a stateless one
for idx, callback in enumerate(trainer.callbacks):
if isinstance(callback, Timer):
trainer.callbacks[idx] = StatelessTimer(cfg.trainer.max_time,)
# hydra interpolation does not work here as the interpolation key is lost when PTL saves hparams
with open_dict(cfg):
cfg.model.precision = cfg.trainer.precision
model = MegatronT5Model(cfg.model, trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
|
the-stack_0_27234
|
#
# OtterTune - gp_tf.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
'''
Created on Aug 18, 2016
@author: Bohan Zhang, Dana Van Aken
'''
import gc
import numpy as np
import tensorflow as tf
from .util import get_analysis_logger
LOG = get_analysis_logger(__name__)
class GPRResult(object):
def __init__(self, ypreds=None, sigmas=None):
self.ypreds = ypreds
self.sigmas = sigmas
class GPRGDResult(GPRResult):
def __init__(self, ypreds=None, sigmas=None,
minl=None, minl_conf=None):
super(GPRGDResult, self).__init__(ypreds, sigmas)
self.minl = minl
self.minl_conf = minl_conf
class GPR(object):
MAX_TRAIN_SIZE = 7000
BATCH_SIZE = 3000
NUM_THREADS = 4
def __init__(self, length_scale=1.0, magnitude=1.0, check_numerics=True,
debug=False):
assert np.isscalar(length_scale)
assert np.isscalar(magnitude)
assert length_scale > 0 and magnitude > 0
self.length_scale = length_scale
self.magnitude = magnitude
self.check_numerics = check_numerics
self.debug = debug
self.X_train = None
self.y_train = None
self.xy_ = None
self.K = None
self.K_inv = None
self.graph = None
self.vars = None
self.ops = None
def build_graph(self):
self.vars = {}
self.ops = {}
self.graph = tf.Graph()
with self.graph.as_default():
mag_const = tf.constant(self.magnitude,
dtype=np.float32,
name='magnitude')
ls_const = tf.constant(self.length_scale,
dtype=np.float32,
name='length_scale')
# Nodes for distance computation
v1 = tf.placeholder(tf.float32, name="v1")
v2 = tf.placeholder(tf.float32, name="v2")
dist_op = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(v1, v2), 2), 1), name='dist_op')
if self.check_numerics:
dist_op = tf.check_numerics(dist_op, "dist_op: ")
self.vars['v1_h'] = v1
self.vars['v2_h'] = v2
self.ops['dist_op'] = dist_op
# Nodes for kernel computation
X_dists = tf.placeholder(tf.float32, name='X_dists')
ridge_ph = tf.placeholder(tf.float32, name='ridge')
K_op = mag_const * tf.exp(-X_dists / ls_const)
if self.check_numerics:
K_op = tf.check_numerics(K_op, "K_op: ")
K_ridge_op = K_op + tf.diag(ridge_ph)
if self.check_numerics:
K_ridge_op = tf.check_numerics(K_ridge_op, "K_ridge_op: ")
self.vars['X_dists_h'] = X_dists
self.vars['ridge_h'] = ridge_ph
self.ops['K_op'] = K_op
self.ops['K_ridge_op'] = K_ridge_op
# Nodes for xy computation
K = tf.placeholder(tf.float32, name='K')
K_inv = tf.placeholder(tf.float32, name='K_inv')
xy_ = tf.placeholder(tf.float32, name='xy_')
yt_ = tf.placeholder(tf.float32, name='yt_')
K_inv_op = tf.matrix_inverse(K)
if self.check_numerics:
K_inv_op = tf.check_numerics(K_inv_op, "K_inv: ")
xy_op = tf.matmul(K_inv, yt_)
if self.check_numerics:
xy_op = tf.check_numerics(xy_op, "xy_: ")
self.vars['K_h'] = K
self.vars['K_inv_h'] = K_inv
self.vars['xy_h'] = xy_
self.vars['yt_h'] = yt_
self.ops['K_inv_op'] = K_inv_op
self.ops['xy_op'] = xy_op
# Nodes for yhat/sigma computation
K2 = tf.placeholder(tf.float32, name="K2")
K3 = tf.placeholder(tf.float32, name="K3")
yhat_ = tf.cast(tf.matmul(tf.transpose(K2), xy_), tf.float32)
if self.check_numerics:
yhat_ = tf.check_numerics(yhat_, "yhat_: ")
sv1 = tf.matmul(tf.transpose(K2), tf.matmul(K_inv, K2))
if self.check_numerics:
sv1 = tf.check_numerics(sv1, "sv1: ")
sig_val = tf.cast((tf.sqrt(tf.diag_part(K3 - sv1))), tf.float32)
if self.check_numerics:
sig_val = tf.check_numerics(sig_val, "sig_val: ")
self.vars['K2_h'] = K2
self.vars['K3_h'] = K3
self.ops['yhat_op'] = yhat_
self.ops['sig_op'] = sig_val
# Compute y_best (min y)
y_best_op = tf.cast(tf.reduce_min(yt_, 0, True), tf.float32)
if self.check_numerics:
y_best_op = tf.check_numerics(y_best_op, "y_best_op: ")
self.ops['y_best_op'] = y_best_op
sigma = tf.placeholder(tf.float32, name='sigma')
yhat = tf.placeholder(tf.float32, name='yhat')
self.vars['sigma_h'] = sigma
self.vars['yhat_h'] = yhat
def __repr__(self):
rep = ""
for k, v in sorted(self.__dict__.iteritems()):
rep += "{} = {}\n".format(k, v)
return rep
def __str__(self):
return self.__repr__()
@staticmethod
def check_X_y(X, y):
from sklearn.utils.validation import check_X_y
if X.shape[0] > GPR.MAX_TRAIN_SIZE:
raise Exception("X_train size cannot exceed {} ({})"
.format(GPR.MAX_TRAIN_SIZE, X.shape[0]))
return check_X_y(X, y, multi_output=True,
allow_nd=True, y_numeric=True,
estimator="GPR")
def check_fitted(self):
if self.X_train is None or self.y_train is None \
or self.xy_ is None or self.K is None:
raise Exception("The model must be trained before making predictions!")
@staticmethod
def check_array(X):
from sklearn.utils.validation import check_array
return check_array(X, allow_nd=True, estimator="GPR")
@staticmethod
def check_output(X):
finite_els = np.isfinite(X)
if not np.all(finite_els):
raise Exception("Input contains non-finite values: {}"
.format(X[~finite_els]))
def fit(self, X_train, y_train, ridge=1.0):
self._reset()
X_train, y_train = GPR.check_X_y(X_train, y_train)
self.X_train = np.float32(X_train)
self.y_train = np.float32(y_train)
sample_size = self.X_train.shape[0]
if np.isscalar(ridge):
ridge = np.ones(sample_size) * ridge
assert isinstance(ridge, np.ndarray)
assert ridge.ndim == 1
X_dists = np.zeros((sample_size, sample_size), dtype=np.float32)
with tf.Session(graph=self.graph,
config=tf.ConfigProto(
intra_op_parallelism_threads=self.NUM_THREADS)) as sess:
dist_op = self.ops['dist_op']
v1, v2 = self.vars['v1_h'], self.vars['v2_h']
for i in range(sample_size):
X_dists[i] = sess.run(dist_op, feed_dict={v1: self.X_train[i], v2: self.X_train})
K_ridge_op = self.ops['K_ridge_op']
X_dists_ph = self.vars['X_dists_h']
ridge_ph = self.vars['ridge_h']
self.K = sess.run(K_ridge_op, feed_dict={X_dists_ph: X_dists, ridge_ph: ridge})
K_ph = self.vars['K_h']
K_inv_op = self.ops['K_inv_op']
self.K_inv = sess.run(K_inv_op, feed_dict={K_ph: self.K})
xy_op = self.ops['xy_op']
K_inv_ph = self.vars['K_inv_h']
yt_ph = self.vars['yt_h']
self.xy_ = sess.run(xy_op, feed_dict={K_inv_ph: self.K_inv,
yt_ph: self.y_train})
return self
def predict(self, X_test):
self.check_fitted()
X_test = np.float32(GPR.check_array(X_test))
test_size = X_test.shape[0]
sample_size = self.X_train.shape[0]
arr_offset = 0
yhats = np.zeros([test_size, 1])
sigmas = np.zeros([test_size, 1])
with tf.Session(graph=self.graph,
config=tf.ConfigProto(
intra_op_parallelism_threads=self.NUM_THREADS)) as sess:
# Nodes for distance operation
dist_op = self.ops['dist_op']
v1 = self.vars['v1_h']
v2 = self.vars['v2_h']
# Nodes for kernel computation
K_op = self.ops['K_op']
X_dists = self.vars['X_dists_h']
# Nodes to compute yhats/sigmas
yhat_ = self.ops['yhat_op']
K_inv_ph = self.vars['K_inv_h']
K2 = self.vars['K2_h']
K3 = self.vars['K3_h']
xy_ph = self.vars['xy_h']
while arr_offset < test_size:
if arr_offset + GPR.BATCH_SIZE > test_size:
end_offset = test_size
else:
end_offset = arr_offset + GPR.BATCH_SIZE
X_test_batch = X_test[arr_offset:end_offset]
batch_len = end_offset - arr_offset
dists1 = np.zeros([sample_size, batch_len])
for i in range(sample_size):
dists1[i] = sess.run(dist_op, feed_dict={v1: self.X_train[i],
v2: X_test_batch})
sig_val = self.ops['sig_op']
K2_ = sess.run(K_op, feed_dict={X_dists: dists1})
yhat = sess.run(yhat_, feed_dict={K2: K2_, xy_ph: self.xy_})
dists2 = np.zeros([batch_len, batch_len])
for i in range(batch_len):
dists2[i] = sess.run(dist_op, feed_dict={v1: X_test_batch[i], v2: X_test_batch})
K3_ = sess.run(K_op, feed_dict={X_dists: dists2})
sigma = np.zeros([1, batch_len], np.float32)
sigma[0] = sess.run(sig_val, feed_dict={K_inv_ph: self.K_inv, K2: K2_, K3: K3_})
sigma = np.transpose(sigma)
yhats[arr_offset: end_offset] = yhat
sigmas[arr_offset: end_offset] = sigma
arr_offset = end_offset
GPR.check_output(yhats)
GPR.check_output(sigmas)
return GPRResult(yhats, sigmas)
def get_params(self, deep=True):
return {"length_scale": self.length_scale,
"magnitude": self.magnitude,
"X_train": self.X_train,
"y_train": self.y_train,
"xy_": self.xy_,
"K": self.K,
"K_inv": self.K_inv}
def set_params(self, **parameters):
for param, val in parameters.iteritems():
setattr(self, param, val)
return self
def _reset(self):
self.X_train = None
self.y_train = None
self.xy_ = None
self.K = None
self.K_inv = None
self.graph = None
self.build_graph()
gc.collect()
class GPRGD(GPR):
DEFAULT_LENGTH_SCALE = 1.0
DEFAULT_MAGNITUDE = 1.0
DEFAULT_RIDGE = 1.0
DEFAULT_LEARNING_RATE = 0.01
DEFAULT_EPSILON = 1e-6
DEFAULT_MAX_ITER = 100
DEFAULT_RIDGE = 1.0
DEFAULT_SIGMA_MULTIPLIER = 3.0
DEFAULT_MU_MULTIPLIER = 1.0
GP_BETA_UCB = "UCB"
GP_BETA_CONST = "CONST"
def __init__(self, length_scale=DEFAULT_LENGTH_SCALE,
magnitude=DEFAULT_MAGNITUDE,
learning_rate=DEFAULT_LEARNING_RATE,
epsilon=DEFAULT_EPSILON,
max_iter=DEFAULT_MAX_ITER,
sigma_multiplier=DEFAULT_SIGMA_MULTIPLIER,
mu_multiplier=DEFAULT_MU_MULTIPLIER):
super(GPRGD, self).__init__(length_scale, magnitude)
self.learning_rate = learning_rate
self.epsilon = epsilon
self.max_iter = max_iter
self.sigma_multiplier = sigma_multiplier
self.mu_multiplier = mu_multiplier
def fit(self, X_train, y_train, ridge=DEFAULT_RIDGE):
super(GPRGD, self).fit(X_train, y_train, ridge)
with tf.Session(graph=self.graph,
config=tf.ConfigProto(
intra_op_parallelism_threads=self.NUM_THREADS)) as sess:
xt_ = tf.Variable(self.X_train[0], tf.float32)
xt_ph = tf.placeholder(tf.float32)
xt_assign_op = xt_.assign(xt_ph)
init = tf.global_variables_initializer()
sess.run(init)
K2_mat = tf.transpose(tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.pow(
tf.subtract(xt_, self.X_train), 2), 1)), 0))
if self.check_numerics is True:
K2_mat = tf.check_numerics(K2_mat, "K2_mat: ")
K2__ = tf.cast(self.magnitude * tf.exp(-K2_mat / self.length_scale), tf.float32)
if self.check_numerics is True:
K2__ = tf.check_numerics(K2__, "K2__: ")
yhat_gd = tf.cast(tf.matmul(tf.transpose(K2__), self.xy_), tf.float32)
if self.check_numerics is True:
yhat_gd = tf.check_numerics(yhat_gd, message="yhat: ")
sig_val = tf.cast((tf.sqrt(self.magnitude - tf.matmul(
tf.transpose(K2__), tf.matmul(self.K_inv, K2__)))), tf.float32)
if self.check_numerics is True:
sig_val = tf.check_numerics(sig_val, message="sigma: ")
LOG.debug("\nyhat_gd : %s", str(sess.run(yhat_gd)))
LOG.debug("\nsig_val : %s", str(sess.run(sig_val)))
loss = tf.squeeze(tf.subtract(self.mu_multiplier * yhat_gd,
self.sigma_multiplier * sig_val))
if self.check_numerics is True:
loss = tf.check_numerics(loss, "loss: ")
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
epsilon=self.epsilon)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
train = optimizer.minimize(loss)
self.vars['xt_'] = xt_
self.vars['xt_ph'] = xt_ph
self.ops['xt_assign_op'] = xt_assign_op
self.ops['yhat_gd'] = yhat_gd
self.ops['sig_val2'] = sig_val
self.ops['loss_op'] = loss
self.ops['train_op'] = train
return self
def predict(self, X_test, constraint_helper=None, # pylint: disable=arguments-differ
categorical_feature_method='hillclimbing',
categorical_feature_steps=3):
self.check_fitted()
X_test = np.float32(GPR.check_array(X_test))
test_size = X_test.shape[0]
nfeats = self.X_train.shape[1]
arr_offset = 0
yhats = np.zeros([test_size, 1])
sigmas = np.zeros([test_size, 1])
minls = np.zeros([test_size, 1])
minl_confs = np.zeros([test_size, nfeats])
with tf.Session(graph=self.graph,
config=tf.ConfigProto(
intra_op_parallelism_threads=self.NUM_THREADS)) as sess:
while arr_offset < test_size:
if arr_offset + GPR.BATCH_SIZE > test_size:
end_offset = test_size
else:
end_offset = arr_offset + GPR.BATCH_SIZE
X_test_batch = X_test[arr_offset:end_offset]
batch_len = end_offset - arr_offset
xt_ = self.vars['xt_']
init = tf.global_variables_initializer()
sess.run(init)
sig_val = self.ops['sig_val2']
yhat_gd = self.ops['yhat_gd']
loss = self.ops['loss_op']
train = self.ops['train_op']
xt_ph = self.vars['xt_ph']
assign_op = self.ops['xt_assign_op']
yhat = np.empty((batch_len, 1))
sigma = np.empty((batch_len, 1))
minl = np.empty((batch_len, 1))
minl_conf = np.empty((batch_len, nfeats))
for i in range(batch_len):
if self.debug is True:
LOG.info("-------------------------------------------")
yhats_it = np.empty((self.max_iter + 1,)) * np.nan
sigmas_it = np.empty((self.max_iter + 1,)) * np.nan
losses_it = np.empty((self.max_iter + 1,)) * np.nan
confs_it = np.empty((self.max_iter + 1, nfeats)) * np.nan
sess.run(assign_op, feed_dict={xt_ph: X_test_batch[i]})
step = 0
for step in range(self.max_iter):
if self.debug is True:
LOG.info("Batch %d, iter %d:", i, step)
yhats_it[step] = sess.run(yhat_gd)[0][0]
sigmas_it[step] = sess.run(sig_val)[0][0]
losses_it[step] = sess.run(loss)
confs_it[step] = sess.run(xt_)
if self.debug is True:
LOG.info(" yhat: %s", str(yhats_it[step]))
LOG.info(" sigma: %s", str(sigmas_it[step]))
LOG.info(" loss: %s", str(losses_it[step]))
LOG.info(" conf: %s", str(confs_it[step]))
sess.run(train)
# if constraint_helper is not None:
# xt_valid = constraint_helper.apply_constraints(sess.run(xt_))
# sess.run(assign_op, feed_dict={xt_ph:xt_valid})
#
# if categorical_feature_method == 'hillclimbing':
# if step % categorical_feature_steps == 0:
# current_xt = sess.run(xt_)
# current_loss = sess.run(loss)
# new_xt = \
# constraint_helper.randomize_categorical_features(
# current_xt)
# sess.run(assign_op, feed_dict={xt_ph:new_xt})
# new_loss = sess.run(loss)
# if current_loss < new_loss:
# sess.run(assign_op, feed_dict={xt_ph:current_xt})
# else:
# raise Exception("Unknown categorical feature method: {}"
# .format(categorical_feature_method))
# except:
# break
if step == self.max_iter - 1:
# Record results from final iteration
yhats_it[-1] = sess.run(yhat_gd)[0][0]
sigmas_it[-1] = sess.run(sig_val)[0][0]
losses_it[-1] = sess.run(loss)
confs_it[-1] = sess.run(xt_)
assert np.all(np.isfinite(yhats_it))
assert np.all(np.isfinite(sigmas_it))
assert np.all(np.isfinite(losses_it))
assert np.all(np.isfinite(confs_it))
# Store info for conf with min loss from all iters
if np.all(~np.isfinite(losses_it)):
min_loss_idx = 0
else:
min_loss_idx = np.nanargmin(losses_it)
yhat[i] = yhats_it[min_loss_idx]
sigma[i] = sigmas_it[min_loss_idx]
minl[i] = losses_it[min_loss_idx]
minl_conf[i] = confs_it[min_loss_idx]
minls[arr_offset:end_offset] = minl
minl_confs[arr_offset:end_offset] = minl_conf
yhats[arr_offset:end_offset] = yhat
sigmas[arr_offset:end_offset] = sigma
arr_offset = end_offset
GPR.check_output(yhats)
GPR.check_output(sigmas)
GPR.check_output(minls)
GPR.check_output(minl_confs)
return GPRGDResult(yhats, sigmas, minls, minl_confs)
@staticmethod
def calculate_sigma_multiplier(t, ndim, bound=0.1):
assert t > 0
assert ndim > 0
assert bound > 0 and bound <= 1
beta = 2 * np.log(ndim * (t**2) * (np.pi**2) / 6 * bound)
if beta > 0:
beta = np.sqrt(beta)
else:
beta = 1
return beta
# def gp_tf(X_train, y_train, X_test, ridge, length_scale, magnitude, batch_size=3000):
# with tf.Graph().as_default():
# y_best = tf.cast(tf.reduce_min(y_train, 0, True), tf.float32)
# sample_size = X_train.shape[0]
# train_size = X_test.shape[0]
# arr_offset = 0
# yhats = np.zeros([train_size, 1])
# sigmas = np.zeros([train_size, 1])
# eips = np.zeros([train_size, 1])
# X_train = np.float32(X_train)
# y_train = np.float32(y_train)
# X_test = np.float32(X_test)
# ridge = np.float32(ridge)
#
# v1 = tf.placeholder(tf.float32,name="v1")
# v2 = tf.placeholder(tf.float32,name="v2")
# dist_op = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(v1, v2), 2), 1))
# try:
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
#
# dists = np.zeros([sample_size,sample_size])
# for i in range(sample_size):
# dists[i] = sess.run(dist_op,feed_dict={v1:X_train[i], v2:X_train})
#
#
# dists = tf.cast(dists, tf.float32)
# K = magnitude * tf.exp(-dists/length_scale) + tf.diag(ridge);
#
# K2 = tf.placeholder(tf.float32, name="K2")
# K3 = tf.placeholder(tf.float32, name="K3")
#
# x = tf.matmul(tf.matrix_inverse(K), y_train)
# yhat_ = tf.cast(tf.matmul(tf.transpose(K2), x), tf.float32);
# sig_val = tf.cast((tf.sqrt(tf.diag_part(K3 - tf.matmul(tf.transpose(K2),
# tf.matmul(tf.matrix_inverse(K),
# K2))))),
# tf.float32)
#
# u = tf.placeholder(tf.float32, name="u")
# phi1 = 0.5 * tf.erf(u / np.sqrt(2.0)) + 0.5
# phi2 = (1.0 / np.sqrt(2.0 * np.pi)) * tf.exp(tf.square(u) * (-0.5));
# eip = (tf.multiply(u, phi1) + phi2);
#
# while arr_offset < train_size:
# if arr_offset + batch_size > train_size:
# end_offset = train_size
# else:
# end_offset = arr_offset + batch_size;
#
# xt_ = X_test[arr_offset:end_offset];
# batch_len = end_offset - arr_offset
#
# dists = np.zeros([sample_size, batch_len])
# for i in range(sample_size):
# dists[i] = sess.run(dist_op, feed_dict={v1:X_train[i], v2:xt_})
#
# K2_ = magnitude * tf.exp(-dists / length_scale);
# K2_ = sess.run(K2_)
#
# dists = np.zeros([batch_len, batch_len])
# for i in range(batch_len):
# dists[i] = sess.run(dist_op, feed_dict={v1:xt_[i], v2:xt_})
# K3_ = magnitude * tf.exp(-dists / length_scale);
# K3_ = sess.run(K3_)
#
# yhat = sess.run(yhat_, feed_dict={K2:K2_})
#
# sigma = np.zeros([1, batch_len], np.float32)
# sigma[0] = (sess.run(sig_val, feed_dict={K2:K2_, K3:K3_}))
# sigma = np.transpose(sigma)
#
# u_ = tf.cast(tf.div(tf.subtract(y_best, yhat), sigma), tf.float32)
# u_ = sess.run(u_)
# eip_p = sess.run(eip, feed_dict={u:u_})
# eip_ = tf.multiply(sigma, eip_p)
# yhats[arr_offset:end_offset] = yhat
# sigmas[arr_offset:end_offset] = sigma;
# eips[arr_offset:end_offset] = sess.run(eip_);
# arr_offset = end_offset
#
# finally:
# sess.close()
#
# return yhats, sigmas, eips
def euclidean_mat(X, y, sess):
x_n = X.shape[0]
y_n = y.shape[0]
z = np.zeros([x_n, y_n])
for i in range(x_n):
v1 = X[i]
tmp = []
for j in range(y_n):
v2 = y[j]
tmp.append(tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(v1, v2), 2))))
z[i] = (sess.run(tmp))
return z
def gd_tf(xs, ys, xt, ridge, length_scale=1.0, magnitude=1.0, max_iter=50):
LOG.debug("xs shape: %s", str(xs.shape))
LOG.debug("ys shape: %s", str(ys.shape))
LOG.debug("xt shape: %s", str(xt.shape))
with tf.Graph().as_default():
# y_best = tf.cast(tf.reduce_min(ys,0,True),tf.float32); #array
# yhat_gd = tf.check_numerics(yhat_gd, message="yhat: ")
sample_size = xs.shape[0]
nfeats = xs.shape[1]
test_size = xt.shape[0]
# arr_offset = 0
ini_size = xt.shape[0]
yhats = np.zeros([test_size, 1])
sigmas = np.zeros([test_size, 1])
minl = np.zeros([test_size, 1])
new_conf = np.zeros([test_size, nfeats])
xs = np.float32(xs)
ys = np.float32(ys)
xt_ = tf.Variable(xt[0], tf.float32)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=8))
init = tf.global_variables_initializer()
sess.run(init)
ridge = np.float32(ridge)
v1 = tf.placeholder(tf.float32, name="v1")
v2 = tf.placeholder(tf.float32, name="v2")
dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(v1, v2), 2), 1))
tmp = np.zeros([sample_size, sample_size])
for i in range(sample_size):
tmp[i] = sess.run(dist, feed_dict={v1: xs[i], v2: xs})
tmp = tf.cast(tmp, tf.float32)
K = magnitude * tf.exp(-tmp / length_scale) + tf.diag(ridge)
LOG.debug("K shape: %s", str(sess.run(K).shape))
K2_mat = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(xt_, xs), 2), 1))
K2_mat = tf.transpose(tf.expand_dims(K2_mat, 0))
K2 = tf.cast(tf.exp(-K2_mat / length_scale), tf.float32)
x = tf.matmul(tf.matrix_inverse(K), ys)
x = sess.run(x)
yhat_ = tf.cast(tf.matmul(tf.transpose(K2), x), tf.float32)
sig_val = tf.cast((tf.sqrt(magnitude - tf.matmul(
tf.transpose(K2), tf.matmul(tf.matrix_inverse(K), K2)))), tf.float32)
LOG.debug('yhat shape: %s', str(sess.run(yhat_).shape))
LOG.debug('sig_val shape: %s', str(sess.run(sig_val).shape))
yhat_ = tf.check_numerics(yhat_, message='yhat: ')
sig_val = tf.check_numerics(sig_val, message='sig_val: ')
loss = tf.squeeze(tf.subtract(yhat_, sig_val))
loss = tf.check_numerics(loss, message='loss: ')
# optimizer = tf.train.GradientDescentOptimizer(0.1)
LOG.debug('loss: %s', str(sess.run(loss)))
optimizer = tf.train.AdamOptimizer(0.1)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess.run(init)
for i in range(ini_size):
assign_op = xt_.assign(xt[i])
sess.run(assign_op)
for step in range(max_iter):
LOG.debug('sample #: %d, iter #: %d, loss: %s', i, step, str(sess.run(loss)))
sess.run(train)
yhats[i] = sess.run(yhat_)[0][0]
sigmas[i] = sess.run(sig_val)[0][0]
minl[i] = sess.run(loss)
new_conf[i] = sess.run(xt_)
return yhats, sigmas, minl, new_conf
def main():
pass
# check_gd_equivalence()
def create_random_matrices(n_samples=3000, n_feats=12, n_test=4444):
X_train = np.random.rand(n_samples, n_feats)
y_train = np.random.rand(n_samples, 1)
X_test = np.random.rand(n_test, n_feats)
length_scale = np.random.rand()
magnitude = np.random.rand()
ridge = np.ones(n_samples) * np.random.rand()
return X_train, y_train, X_test, length_scale, magnitude, ridge
# def check_equivalence():
# X_train, y_train, X_test, length_scale, magnitude, ridge = create_random_matrices()
#
# LOG.info("Running GPR method...")
# start = time()
# yhats1, sigmas1, eips1 = gp_tf(X_train, y_train, X_test, ridge,
# length_scale, magnitude)
# print "GPR method: {0:.3f} seconds".format(time() - start)
#
# print "Running GPR class..."
# start = time()
# gpr = GPR(length_scale, magnitude)
# gpr.fit(X_train, y_train, ridge)
# yhats2, sigmas2, eips2 = gpr.predict(X_test)
# print "GPR class: {0:.3f} seconds".format(time() - start)
#
# assert np.allclose(yhats1, yhats2)
# assert np.allclose(sigmas1, sigmas2)
# assert np.allclose(eips1, eips2)
# def check_gd_equivalence():
# X_train, y_train, X_test, length_scale, magnitude, ridge = \
# create_random_matrices(n_test=2)
# print "Running GPR method..."
# start = time()
# yhats3, sigmas3, _ = gp_tf(X_train, y_train, X_test, ridge,
# length_scale, magnitude)
# print "Done."
# print "GPR method: {0:.3f} seconds\n".format(time() - start)
# print "Running GD method..."
# start = time()
# yhats1, sigmas1, minl, minl_conf = gd_tf(X_train, y_train, X_test, ridge,
# length_scale, magnitude, max_iter=5)
# print "Done."
# print "GD method: {0:.3f} seconds\n".format(time() - start)
# print "Running GPR class..."
# start = time()
# gpr = GPR(length_scale, magnitude)
# gpr.fit(X_train, y_train, ridge)
# gpres1 = gpr.predict(X_test)
# print "GPR class: {0:.3f} seconds\n".format(time() - start)
#
# print "Running GPRGD class..."
# start = time()
# gpr_gd = GPRGD(length_scale, magnitude, max_iter=5)
# gpr_gd.fit(X_train, y_train, ridge)
# gpres2 = gpr_gd.predict(X_test)
# print "GPRGD class: {0:.3f} seconds\n".format(time() - start)
# assert np.allclose(yhats1, yhats3, atol=1e-4)
# assert np.allclose(sigmas1, sigmas3, atol=1e-4)
# assert np.allclose(yhats1, gpres1.ypreds, atol=1e-4)
# assert np.allclose(sigmas1, gpres1.sigmas, atol=1e-4)
# assert np.allclose(yhats1, gpres2.ypreds, atol=1e-4)
# assert np.allclose(sigmas1, gpres2.sigmas, atol=1e-4)
# assert np.allclose(minl, gpres2.minl, atol=1e-4)
# assert np.allclose(minl_conf, gpres2.minl_conf, atol=1e-4)
# def test_constraints():
# import os.path
# from .constraints import ParamConstraintHelper
# from .matrix import Matrix
# from .preprocessing import (DummyEncoder, dummy_encoder_helper,
# fix_scaler, get_min_max, MinMaxScaler)
# from .util import get_featured_knobs
# from dbms.param import ConfigManager
# from sklearn.preprocessing import StandardScaler
#
# n_feats = 12
# test_size = 5
#
# datadir = '/usr0/home/dvanaken/Dropbox/Apps/ottertune/data/'
# 'analysis_20160910-204945_exps_mysql_5.6_m3.xlarge_'
# 'ycsb_rr_sf18000_tr50_t300_runlimited_w50-0-0-50-0-0_s0.6'
# X_train = Matrix.load_matrix(os.path.join(datadir, "X_data_enc.npz"))
# y_train = Matrix.load_matrix(os.path.join(datadir, "y_data_enc.npz"))
# length_scale, magnitude, ridge_const = 10.0, 10.0, 7.15
# featured_knobs = get_featured_knobs("mysql", "m3.xlarge")[:n_feats]
# X_train = X_train.filter(featured_knobs, 'columns')
# y_train = y_train.filter(np.array(['99th_lat_ms']), 'columns')
#
# config_mgr = ConfigManager.get_config_manager('mysql')
# X_test = config_mgr.get_param_grid(X_train.columnlabels)
# X_test = X_test[np.random.choice(np.arange(X_test.shape[0]), test_size, replace=False)]
#
# cat_knob_indices, n_values = dummy_encoder_helper("mysql", X_train.columnlabels)
# encoder = DummyEncoder(n_values, cat_knob_indices)
# encoder.fit(X_train.data, columnlabels=X_train.columnlabels)
# X_train_enc = Matrix(encoder.transform(X_train.data),
# X_train.rowlabels,
# encoder.columnlabels)
# X_test_enc = encoder.transform(X_test)
#
# param_list = []
# for pname in X_train.columnlabels:
# param = config_mgr._find_param(pname)
# print param.name, param.data_type
# param_list.append(param)
# print len(param_list)
#
# mins, maxs = get_min_max(encoder, param_list)
# X_scaler = MinMaxScaler(mins, maxs)
# print mins
# print maxs
# X_scaler = StandardScaler()
# X_scaler.fit(X_train_enc.data)
# X_scaler.partial_fit(X_test_enc)
# premean = np.array(X_scaler.mean_)
# fix_scaler(X_scaler, encoder, param_list)
# assert not np.array_equal(premean, X_scaler.mean_)
# X_train_data = X_scaler.transform(X_train_enc.data)
# X_test_data = X_scaler.transform(X_test_enc)
#
# y_scaler = StandardScaler()
# y_train_data = y_scaler.fit_transform(y_train.data)
#
# print X_train_data
# print X_train_enc.columnlabels
#
# constraint_helper = ParamConstraintHelper(param_list, X_scaler, encoder)
#
# ridge = np.ones(X_train_data.shape[0])* ridge_const
# print "Running GPRGD class..."
# start = time()
# gpr_gd = GPRGD(length_scale, magnitude, max_iter=30)
# gpr_gd.fit(X_train_data, y_train_data, ridge)
# gpres2 = gpr_gd.predict(X_test_data, constraint_helper)
# print "GPRGD class: {0:.3f} seconds\n".format(time() - start)
#
# best_idx = np.argmin(gpres2.minl)
# print ""
# best_conf = constraint_helper.get_valid_config(gpres2.minl_conf[best_idx], rescale=False)
# for n,v in zip(X_train.columnlabels, best_conf):
# print "{}: {}".format(n,v)
if __name__ == "__main__":
main()
|
the-stack_0_27236
|
import pyro
import torch
import pyro.distributions as dist
import pyro.distributions.transforms as T
from .BaseSampler import BaseSampler
class NFSampler(BaseSampler):
def __init__(self, name, splines: int, sigmoid: bool, lambd: float, p: float, device: torch.device):
super().__init__(name)
self.sigmoid = sigmoid
self.splines_n = splines
self.device = device
self.init = False
self.lambd = lambd
self.p = p
def _init_node(self, N):
self.base_dist = dist.Normal(torch.zeros(N).to(self.device), torch.ones(N).to(self.device))
self.splines = []
for _ in range(self.splines_n):
self.splines.append(T.spline(N).to(self.device))
self.flow_dist = dist.TransformedDistribution(self.base_dist, self.splines)
def sample_model(self, X, y, explainer):
if not self.init:
self._init_node(explainer.edge_index_adj.shape[1])
m_sub = self.flow_dist.rsample(torch.Size([250, ]))
if self.sigmoid:
m_sub = m_sub.sigmoid().clamp(0, 1).mean(dim=0)
else:
m_sub = m_sub.clamp(0, 1).mean(dim=0)
m = pyro.sample("m", dist.Bernoulli(m_sub).to_event(1))
mean = explainer.model(X, explainer.edge_index_adj[:, m == 1])[explainer.mapping].reshape(-1).exp()
y_sample = pyro.sample("y_sample", dist.Categorical(probs=y/y.sum()))
_ = pyro.sample("y_hat", dist.Categorical(probs=mean/mean.sum()), obs=y_sample)
def sample_guide(self, X, y, explainer):
if not self.init:
self._init_node(explainer.edge_index_adj.shape[1])
modules = []
for (i, spline) in enumerate(self.splines):
modules.append(pyro.module(f"spline{i}", spline))
m_sub = self.flow_dist.rsample(torch.Size([250, ]))
if self.sigmoid:
m_sub = m_sub.sigmoid().clamp(0, 1).mean(dim=0)
else:
m_sub = m_sub.clamp(0, 1).mean(dim=0)
m = pyro.sample("m", dist.Bernoulli(m_sub).to_event(1))
mean = explainer.model(X, explainer.edge_index_adj[:, m == 1])[explainer.mapping].reshape(-1).exp()
y_sample = pyro.sample("y_sample", dist.Categorical(probs=y))
_ = pyro.sample("y_hat", dist.Categorical(probs=mean), obs=y_sample)
def edge_mask(self, explainer):
sample = self.flow_dist.rsample(torch.Size([10000, ]))
sample = sample.sigmoid() if self.sigmoid else sample
sample = sample.clamp(0, 1)
return sample.mean(dim=0)
def L(self, p):
sample = self.flow_dist.rsample(torch.Size([250, ]))
sample = sample.sigmoid() if self.sigmoid else sample.clamp(0, 1)
sample = sample.pow(p)
sample = sample / sample.max()
return sample.mean()
def loss_fn(self, model, guide, *args, **kwargs):
return pyro.infer.Trace_ELBO().differentiable_loss(model, guide, *args) + self.lambd * self.L(self.p)
def run_name(self):
return f"{self.name}_splines-{len(self.splines)}_sig-{self.sigmoid}_lambd-{self.lambd}_p-{self.p}"
|
the-stack_0_27237
|
"""Provide info to system health."""
from aiogithubapi.common.const import BASE_API_URL
from homeassistant.components import system_health
from homeassistant.core import HomeAssistant, callback
from .base import HacsBase
from .const import DOMAIN
GITHUB_STATUS = "https://www.githubstatus.com/"
@callback
def async_register(hass: HomeAssistant, register: system_health.SystemHealthRegistration) -> None:
"""Register system health callbacks."""
register.domain = "Home Assistant Community Store"
register.async_register_info(system_health_info, "/hacs")
async def system_health_info(hass):
"""Get info for the info page."""
hacs: HacsBase = hass.data[DOMAIN]
response = await hacs.githubapi.rate_limit()
data = {
"GitHub API": system_health.async_check_can_reach_url(hass, BASE_API_URL, GITHUB_STATUS),
"Github API Calls Remaining": response.data.resources.core.remaining,
"Installed Version": hacs.version,
"Stage": hacs.stage,
"Available Repositories": len(hacs.repositories),
"Installed Repositories": len([repo for repo in hacs.repositories if repo.data.installed]),
}
if hacs.system.disabled:
data["Disabled"] = hacs.system.disabled_reason
return data
|
the-stack_0_27241
|
# -*- coding: utf-8 -*-
"""
lantz.qt.chart
~~~~~~~~~~~~~~
A chart frontend.
:copyright: 2018 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from lantz.core import Q_
# Import Qt modules from lantz (pyside and pyqt compatible)
from ..utils.qt import QtGui
# These classes simplify application development
from ..app import Frontend
class ChartUi(Frontend):
"""A frontend with a x,y plot (powered by pyqtgraph)"""
# a declarative way to indicate the user interface file to use.
# The file must be located next to the python file where this class
# is defined.
gui = 'placeholder.ui'
# connect widgets to instruments using connect_setup automatically.
auto_connect = True
_axis = {'x': 'bottom',
'y': 'left'}
def __init__(self, xlabel='', xunits='', ylabel='', yunits='', *args, **kwargs):
self._labels = {'x': xlabel, 'y': ylabel}
self._units = {'x': xunits, 'y': yunits}
self._x = []
self._y = []
super().__init__(*args, **kwargs)
@property
def xlabel(self):
"""x-axis label."""
return self._labels['x']
@xlabel.setter
def xlabel(self, value):
self._labels['x'] = value
self._relabel('x')
@property
def ylabel(self):
"""y-axis label."""
return self._labels['y']
@ylabel.setter
def ylabel(self, value):
self._labels['y'] = value
self._relabel('y')
@property
def xunits(self):
"""x-axis units as a string."""
return self._units['x']
@xunits.setter
def xunits(self, value):
self._units['x'] = value
self._relabel('x')
@property
def yunits(self):
"""y-axis units as a string."""
return self._units['y']
@yunits.setter
def yunits(self, value):
self._units['y'] = value
self._relabel('y')
def _relabel(self, axis):
"""Builds the actual label using the label and units for a given axis.
Also builds a quantity to be used to normalized the data.
Parameters
----------
axis :
x' or 'y'
Returns
-------
"""
label = self._labels[axis]
units = self._units[axis]
if label and units:
label = '%s [%s]' % (label, units)
elif units:
label = '[%s]' % units
self.pw.setLabel(self._axis[axis], label)
if units:
setattr(self, '_q' + axis, Q_(1, units))
def setupUi(self):
import pyqtgraph as pg
pg.setConfigOptions(antialias=True)
# This method is called after gui has been loaded (referenced in self.widget)
# to customize gui building. In this case, we are adding a plot widget.
self.pw = pg.PlotWidget()
self.curve = self.pw.plot(pen='y')
layout = QtGui.QVBoxLayout()
layout.addWidget(self.pw)
self.widget.placeholder.setLayout(layout)
def plot(self, x, y):
"""Add a pair of points to the plot.
"""
x = x.to(self._qx).m
y = y.to(self._qy).m
self._x.append(x)
self._y.append(y)
self.curve.setData(self._x, self._y)
def clear(self, *args):
"""Clear the plot.
"""
self._x.clear()
self._y.clear()
|
the-stack_0_27242
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import ray
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph, \
LearningRateSchedule
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.explained_variance import explained_variance
class PPOLoss(object):
def __init__(self,
action_space,
value_targets,
advantages,
actions,
logits,
vf_preds,
curr_action_dist,
value_fn,
cur_kl_coeff,
valid_mask,
entropy_coeff=0,
clip_param=0.1,
vf_clip_param=0.1,
vf_loss_coeff=1.0,
use_gae=True):
"""Constructs the loss for Proximal Policy Objective.
Arguments:
action_space: Environment observation space specification.
value_targets (Placeholder): Placeholder for target values; used
for GAE.
actions (Placeholder): Placeholder for actions taken
from previous model evaluation.
advantages (Placeholder): Placeholder for calculated advantages
from previous model evaluation.
logits (Placeholder): Placeholder for logits output from
previous model evaluation.
vf_preds (Placeholder): Placeholder for value function output
from previous model evaluation.
curr_action_dist (ActionDistribution): ActionDistribution
of the current model.
value_fn (Tensor): Current value function output Tensor.
cur_kl_coeff (Variable): Variable holding the current PPO KL
coefficient.
valid_mask (Tensor): A bool mask of valid input elements (#2992).
entropy_coeff (float): Coefficient of the entropy regularizer.
clip_param (float): Clip parameter
vf_clip_param (float): Clip parameter for the value function
vf_loss_coeff (float): Coefficient of the value function loss
use_gae (bool): If true, use the Generalized Advantage Estimator.
"""
def reduce_mean_valid(t):
return tf.reduce_mean(tf.boolean_mask(t, valid_mask))
dist_cls, _ = ModelCatalog.get_action_dist(action_space, {})
prev_dist = dist_cls(logits)
# Make loss functions.
logp_ratio = tf.exp(
curr_action_dist.logp(actions) - prev_dist.logp(actions))
action_kl = prev_dist.kl(curr_action_dist)
self.mean_kl = reduce_mean_valid(action_kl)
curr_entropy = curr_action_dist.entropy()
self.mean_entropy = reduce_mean_valid(curr_entropy)
surrogate_loss = tf.minimum(
advantages * logp_ratio,
advantages * tf.clip_by_value(logp_ratio, 1 - clip_param,
1 + clip_param))
self.mean_policy_loss = reduce_mean_valid(-surrogate_loss)
if use_gae:
vf_loss1 = tf.square(value_fn - value_targets)
vf_clipped = vf_preds + tf.clip_by_value(
value_fn - vf_preds, -vf_clip_param, vf_clip_param)
vf_loss2 = tf.square(vf_clipped - value_targets)
vf_loss = tf.maximum(vf_loss1, vf_loss2)
self.mean_vf_loss = reduce_mean_valid(vf_loss)
loss = reduce_mean_valid(
-surrogate_loss + cur_kl_coeff * action_kl +
vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy)
else:
self.mean_vf_loss = tf.constant(0.0)
loss = reduce_mean_valid(-surrogate_loss +
cur_kl_coeff * action_kl -
entropy_coeff * curr_entropy)
self.loss = loss
class PPOPolicyGraph(LearningRateSchedule, TFPolicyGraph):
def __init__(self,
observation_space,
action_space,
config,
existing_inputs=None):
"""
Arguments:
observation_space: Environment observation space specification.
action_space: Environment action space specification.
config (dict): Configuration values for PPO graph.
existing_inputs (list): Optional list of tuples that specify the
placeholders upon which the graph should be built upon.
"""
config = dict(ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, **config)
self.sess = tf.get_default_session()
self.action_space = action_space
self.config = config
self.kl_coeff_val = self.config["kl_coeff"]
self.kl_target = self.config["kl_target"]
dist_cls, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
if existing_inputs:
obs_ph, value_targets_ph, adv_ph, act_ph, \
logits_ph, vf_preds_ph, prev_actions_ph, prev_rewards_ph = \
existing_inputs[:8]
existing_state_in = existing_inputs[8:-1]
existing_seq_lens = existing_inputs[-1]
else:
obs_ph = tf.placeholder(
tf.float32,
name="obs",
shape=(None, ) + observation_space.shape)
adv_ph = tf.placeholder(
tf.float32, name="advantages", shape=(None, ))
act_ph = ModelCatalog.get_action_placeholder(action_space)
logits_ph = tf.placeholder(
tf.float32, name="logits", shape=(None, logit_dim))
vf_preds_ph = tf.placeholder(
tf.float32, name="vf_preds", shape=(None, ))
value_targets_ph = tf.placeholder(
tf.float32, name="value_targets", shape=(None, ))
prev_actions_ph = ModelCatalog.get_action_placeholder(action_space)
prev_rewards_ph = tf.placeholder(
tf.float32, [None], name="prev_reward")
existing_state_in = None
existing_seq_lens = None
self.observations = obs_ph
self.loss_in = [
("obs", obs_ph),
("value_targets", value_targets_ph),
("advantages", adv_ph),
("actions", act_ph),
("logits", logits_ph),
("vf_preds", vf_preds_ph),
("prev_actions", prev_actions_ph),
("prev_rewards", prev_rewards_ph),
]
self.model = ModelCatalog.get_model(
{
"obs": obs_ph,
"prev_actions": prev_actions_ph,
"prev_rewards": prev_rewards_ph
},
observation_space,
logit_dim,
self.config["model"],
state_in=existing_state_in,
seq_lens=existing_seq_lens)
# KL Coefficient
self.kl_coeff = tf.get_variable(
initializer=tf.constant_initializer(self.kl_coeff_val),
name="kl_coeff",
shape=(),
trainable=False,
dtype=tf.float32)
self.logits = self.model.outputs
curr_action_dist = dist_cls(self.logits)
self.sampler = curr_action_dist.sample()
if self.config["use_gae"]:
if self.config["vf_share_layers"]:
self.value_function = self.model.value_function()
else:
vf_config = self.config["model"].copy()
# Do not split the last layer of the value function into
# mean parameters and standard deviation parameters and
# do not make the standard deviations free variables.
vf_config["free_log_std"] = False
vf_config["use_lstm"] = False
with tf.variable_scope("value_function"):
self.value_function = ModelCatalog.get_model({
"obs": obs_ph,
"prev_actions": prev_actions_ph,
"prev_rewards": prev_rewards_ph
}, observation_space, 1, vf_config).outputs
self.value_function = tf.reshape(self.value_function, [-1])
else:
self.value_function = tf.zeros(shape=tf.shape(obs_ph)[:1])
if self.model.state_in:
max_seq_len = tf.reduce_max(self.model.seq_lens)
mask = tf.sequence_mask(self.model.seq_lens, max_seq_len)
mask = tf.reshape(mask, [-1])
else:
mask = tf.ones_like(adv_ph)
self.loss_obj = PPOLoss(
action_space,
value_targets_ph,
adv_ph,
act_ph,
logits_ph,
vf_preds_ph,
curr_action_dist,
self.value_function,
self.kl_coeff,
mask,
entropy_coeff=self.config["entropy_coeff"],
clip_param=self.config["clip_param"],
vf_clip_param=self.config["vf_clip_param"],
vf_loss_coeff=self.config["vf_loss_coeff"],
use_gae=self.config["use_gae"])
LearningRateSchedule.__init__(self, self.config["lr"],
self.config["lr_schedule"])
TFPolicyGraph.__init__(
self,
observation_space,
action_space,
self.sess,
obs_input=obs_ph,
action_sampler=self.sampler,
loss=self.loss_obj.loss,
loss_inputs=self.loss_in,
state_inputs=self.model.state_in,
state_outputs=self.model.state_out,
prev_action_input=prev_actions_ph,
prev_reward_input=prev_rewards_ph,
seq_lens=self.model.seq_lens,
max_seq_len=config["model"]["max_seq_len"])
self.sess.run(tf.global_variables_initializer())
self.explained_variance = explained_variance(value_targets_ph,
self.value_function)
self.stats_fetches = {
"cur_lr": tf.cast(self.cur_lr, tf.float64),
"total_loss": self.loss_obj.loss,
"policy_loss": self.loss_obj.mean_policy_loss,
"vf_loss": self.loss_obj.mean_vf_loss,
"vf_explained_var": self.explained_variance,
"kl": self.loss_obj.mean_kl,
"entropy": self.loss_obj.mean_entropy
}
def copy(self, existing_inputs):
"""Creates a copy of self using existing input placeholders."""
return PPOPolicyGraph(
self.observation_space,
self.action_space,
self.config,
existing_inputs=existing_inputs)
def extra_compute_action_fetches(self):
return {"vf_preds": self.value_function, "logits": self.logits}
def extra_compute_grad_fetches(self):
return self.stats_fetches
def update_kl(self, sampled_kl):
if sampled_kl > 2.0 * self.kl_target:
self.kl_coeff_val *= 1.5
elif sampled_kl < 0.5 * self.kl_target:
self.kl_coeff_val *= 0.5
self.kl_coeff.load(self.kl_coeff_val, session=self.sess)
return self.kl_coeff_val
def value(self, ob, *args):
feed_dict = {self.observations: [ob], self.model.seq_lens: [1]}
assert len(args) == len(self.model.state_in), \
(args, self.model.state_in)
for k, v in zip(self.model.state_in, args):
feed_dict[k] = v
vf = self.sess.run(self.value_function, feed_dict)
return vf[0]
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
completed = sample_batch["dones"][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(len(self.model.state_in)):
next_state.append([sample_batch["state_out_{}".format(i)][-1]])
last_r = self.value(sample_batch["new_obs"][-1], *next_state)
batch = compute_advantages(
sample_batch,
last_r,
self.config["gamma"],
self.config["lambda"],
use_gae=self.config["use_gae"])
return batch
def gradients(self, optimizer):
return optimizer.compute_gradients(
self._loss, colocate_gradients_with_ops=True)
def get_initial_state(self):
return self.model.state_init
|
the-stack_0_27243
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.blocktools import create_raw_transaction
from test_framework.test_framework import TOPAZTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class MempoolCoinbaseTest(TOPAZTestFramework):
def set_test_params(self):
self.num_nodes = 2
alert_filename = None # Set by setup_network
def run_test(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_raw_transaction(self.nodes[0], coinbase_txids[1], node1_address, amount=49.99)
spend_102_raw = create_raw_transaction(self.nodes[0], coinbase_txids[2], node0_address, amount=49.99)
spend_103_raw = create_raw_transaction(self.nodes[0], coinbase_txids[3], node0_address, amount=49.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransactionwithwallet(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_rpc_error(-26, 'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_raw_transaction(self.nodes[0], spend_102_id, node1_address, amount=49.98)
spend_103_1_raw = create_raw_transaction(self.nodes[0], spend_103_id, node1_address, amount=49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Time-locked transaction can now be spent
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
the-stack_0_27246
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import torch
import torch.nn.parallel
from torch.autograd import Variable
import torch.optim as optim
import random
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append('../..')
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, 'models')))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, 'dataloaders')))
import shapenet_part_loader
from utils import PointLoss
from utils import distance_squre
from Linear_model import Linear_autoencoder,CMLP_autoencoder
import data_utils as d_utils
parser = argparse.ArgumentParser()
parser.add_argument('--model_choose',type=int, default=0, help='0 test linear,1 test CMLP')
parser.add_argument('--batch_size', type=int, default=36, help='input batch size')
parser.add_argument('--n_epochs', type=int, default=201, help='number of epochs to train for')
parser.add_argument('--workers', type=int,default=2, help='number of data loading workers')
parser.add_argument('--num_points', type=int, default=2048, help='input point set size')
parser.add_argument('--crop_point_num',type=int,default=512,help='number of crop points ')
parser.add_argument('--outf', type=str, default='tmp_checkpoints', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset', type=str, default='shapenet_part', help='dataset: shapenet_part, shapenet_core13, shapenet_core55')
opt = parser.parse_args()
resume_epoch=0
USE_CUDA = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if opt.model_choose == 0:
Autoencoder = Linear_autoencoder(opt.num_points-opt.crop_point_num,opt.crop_point_num)
else:
Autoencoder = CMLP_autoencoder(opt.num_points-opt.crop_point_num,opt.crop_point_num)
if opt.model != '':
Autoencoder.load_state_dict(torch.load(opt.model,map_location=lambda storage, location: storage)['state_dict'])
resume_epoch = torch.load(opt.model)['epoch']
if USE_CUDA:
print("Let's use", torch.cuda.device_count(), "GPUs!")
Autoencoder.to(device)
Autoencoder = torch.nn.DataParallel(Autoencoder)
dset = shapenet_part_loader.PartDataset( root='../../dataset/shapenetcore_partanno_segmentation_benchmark_v0/',classification=True, class_choice=None, npoints=opt.num_points, split='train')
assert dset
dataloader = torch.utils.data.DataLoader(dset, batch_size=opt.batch_size,
shuffle=True,num_workers = int(opt.workers))
print(len(dataloader))
test_dset = shapenet_part_loader.PartDataset( root='../../dataset/shapenetcore_partanno_segmentation_benchmark_v0/',classification=True, class_choice=None, npoints=opt.num_points, split='test')
test_dataloader = torch.utils.data.DataLoader(test_dset, batch_size=opt.batch_size,
shuffle=True,num_workers = int(opt.workers))
print(Autoencoder)
criterion_PointLoss = PointLoss().to(device)
optimizer = optim.Adam(Autoencoder.parameters(), lr=0.0001,betas=(0.9, 0.999))
schedulerG = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
for epoch in range(resume_epoch,opt.n_epochs):
for batch_id, data in enumerate(dataloader):
Autoencoder.train()
real_point, target = data
real_point = torch.unsqueeze(real_point,1)
p_origin = [0,0,0]
batch_size = real_point.size()[0]
real_center = torch.FloatTensor(batch_size, 1, opt.crop_point_num, 3)
input_cropped =torch.FloatTensor(batch_size, 1, opt.num_points-opt.crop_point_num, 3)
choice =[torch.Tensor([1,0,0]),torch.Tensor([0,0,1]),torch.Tensor([1,0,1]),torch.Tensor([-1,0,0]),torch.Tensor([-1,1,0])]
for m in range(batch_size):
index = random.sample(choice,1)
distance_list = []
p_center = index[0]
for n in range(opt.num_points):
distance_list.append(distance_squre(real_point[m,0,n],p_center))
distance_order = sorted(enumerate(distance_list), key = lambda x:x[1])
for sp in range(opt.crop_point_num):
real_center.data[m,0,sp] = real_point[m,0,distance_order[sp][0]]
crop_num_list = []
for num in range(opt.num_points-opt.crop_point_num):
crop_num_list.append(distance_order[num+opt.crop_point_num][0])
indices = torch.LongTensor(crop_num_list)
input_cropped[m,0]=torch.index_select(real_point[m,0],0,indices)
real_center = torch.squeeze(real_center,1)
input_cropped = torch.squeeze(input_cropped,1)
real_center = Variable(real_center,requires_grad=True)
input_cropped = Variable(input_cropped,requires_grad=True)
real_center = real_center.to(device)
input_cropped = input_cropped.to(device)
optimizer.zero_grad()
reconstruction=Autoencoder(input_cropped)
errG = criterion_PointLoss(reconstruction,real_center)
errG.backward()
optimizer.step()
print('[%d/%d][%d/%d] Loss_G: %.4f '
% (epoch, opt.n_epochs, batch_id, len(dataloader), errG))
f=open('Linear.txt','a')
f.write('\n'+'[%d/%d][%d/%d] Loss_G: %.4f '
% (epoch, opt.n_epochs, batch_id, len(dataloader), errG))
f.close()
if epoch% 10 == 0:
torch.save({'epoch':epoch+1,
'state_dict':Autoencoder.module.state_dict()},
'Trained_Recon_Model_Linear/Linear_ae'+str(epoch)+'.pth' )
schedulerG.step()
|
the-stack_0_27248
|
#!/usr/bin/env python3
# Note:
# - Hardlinks are copied
# - The size of symlinks and directories is meaningless, it depends on whatever
# the filesystem/tar file reports
import argparse
import json
import os
import stat
import sys
import itertools
import logging
import hashlib
import tarfile
VERSION = 3
IDX_NAME = 0
IDX_SIZE = 1
IDX_MTIME = 2
IDX_MODE = 3
IDX_UID = 4
IDX_GID = 5
# target for symbolic links
# child nodes for directories
# filename for files
IDX_TARGET = 6
IDX_FILENAME = 6
HASH_LENGTH = 8
S_IFLNK = 0xA000
S_IFREG = 0x8000
S_IFDIR = 0x4000
def hash_file(filename) -> str:
with open(filename, "rb", buffering=0) as f:
return hash_fileobj(f)
def hash_fileobj(f) -> str:
h = hashlib.sha256()
for b in iter(lambda: f.read(128*1024), b""):
h.update(b)
return h.hexdigest()
def main():
logging.basicConfig(format="%(message)s")
logger = logging.getLogger("fs2json")
logger.setLevel(logging.DEBUG)
args = argparse.ArgumentParser(description="Create filesystem JSON. Example:\n"
" ./fs2xml.py --exclude /boot/ --out fs.json /mnt/",
formatter_class=argparse.RawTextHelpFormatter
)
args.add_argument("--exclude",
action="append",
metavar="path",
help="Path to exclude (relative to base path). Can be specified multiple times.")
args.add_argument("--out",
metavar="out",
nargs="?",
type=argparse.FileType("w"),
help="File to write to (defaults to stdout)",
default=sys.stdout)
args.add_argument("path",
metavar="path-or-tar",
help="Base path or tar file to include in JSON")
args = args.parse_args()
path = os.path.normpath(args.path)
try:
tar = tarfile.open(path, "r")
except IsADirectoryError:
tar = None
if tar:
(root, total_size) = handle_tar(logger, tar)
else:
(root, total_size) = handle_dir(logger, path, args.exclude)
if False:
# normalize the order of children, useful to debug differences between
# the tar and filesystem reader
def sort_children(children):
for c in children:
if isinstance(c[IDX_TARGET], list):
sort_children(c[IDX_TARGET])
children.sort()
sort_children(root)
result = {
"fsroot": root,
"version": VERSION,
"size": total_size,
}
logger.info("Creating json ...")
json.dump(result, args.out, check_circular=False, separators=(',', ':'))
def handle_dir(logger, path, exclude):
path = path + "/"
exclude = exclude or []
exclude = [os.path.join("/", os.path.normpath(p)) for p in exclude]
exclude = set(exclude)
def onerror(oserror):
logger.warning(oserror)
rootdepth = path.count("/")
files = os.walk(path, onerror=onerror)
prevpath = []
mainroot = []
filename_to_hash = {}
total_size = 0
rootstack = [mainroot]
def make_node(st, name):
obj = [None] * 7
obj[IDX_NAME] = name
obj[IDX_SIZE] = st.st_size
obj[IDX_MTIME] = int(st.st_mtime)
obj[IDX_MODE] = int(st.st_mode)
obj[IDX_UID] = st.st_uid
obj[IDX_GID] = st.st_gid
nonlocal total_size
total_size += st.st_size
# Missing:
# int(st.st_atime),
# int(st.st_ctime),
return obj
logger.info("Creating file tree ...")
for f in files:
dirpath, dirnames, filenames = f
pathparts = dirpath.split("/")
pathparts = pathparts[rootdepth:]
fullpath = os.path.join("/", *pathparts)
if fullpath in exclude:
dirnames[:] = []
continue
depth = 0
for this, prev in zip(pathparts, prevpath):
if this != prev:
break
depth += 1
for _name in prevpath[depth:]:
rootstack.pop()
oldroot = rootstack[-1]
assert len(pathparts[depth:]) == 1
openname = pathparts[-1]
if openname == "":
root = mainroot
else:
root = []
st = os.stat(dirpath)
rootobj = make_node(st, openname)
rootobj[IDX_TARGET] = root
oldroot.append(rootobj)
rootstack.append(root)
for filename in itertools.chain(filenames, dirnames):
absname = os.path.join(dirpath, filename)
st = os.lstat(absname)
isdir = stat.S_ISDIR(st.st_mode)
islink = stat.S_ISLNK(st.st_mode)
isfile = stat.S_ISREG(st.st_mode)
if isdir and not islink:
continue
obj = make_node(st, filename)
if islink:
target = os.readlink(absname)
obj[IDX_TARGET] = target
elif isfile:
file_hash = hash_file(absname)
filename = file_hash[0:HASH_LENGTH] + ".bin"
existing = filename_to_hash.get(filename)
assert existing is None or existing == file_hash, "Collision in short hash (%s and %s)" % (existing, file_hash)
filename_to_hash[filename] = file_hash
obj[IDX_FILENAME] = filename
while obj[-1] is None:
obj.pop()
root.append(obj)
prevpath = pathparts
return (mainroot, total_size)
def handle_tar(logger, tar):
mainroot = []
filename_to_hash = {}
total_size = 0
for member in tar.getmembers():
parts = member.name.split("/")
name = parts.pop()
dir = mainroot
for p in parts:
for c in dir:
if c[IDX_NAME] == p:
dir = c[IDX_TARGET]
obj = [None] * 7
obj[IDX_NAME] = name
obj[IDX_SIZE] = member.size
obj[IDX_MTIME] = member.mtime
obj[IDX_MODE] = member.mode
obj[IDX_UID] = member.uid
obj[IDX_GID] = member.gid
if member.isfile() or member.islnk():
obj[IDX_MODE] |= S_IFREG
f = tar.extractfile(member)
file_hash = hash_fileobj(f)
filename = file_hash[0:HASH_LENGTH] + ".bin"
existing = filename_to_hash.get(filename)
assert existing is None or existing == file_hash, "Collision in short hash (%s and %s)" % (existing, file_hash)
filename_to_hash[filename] = file_hash
obj[IDX_FILENAME] = filename
if member.islnk():
# fix size for hard links
f.seek(0, os.SEEK_END)
obj[IDX_SIZE] = int(f.tell())
elif member.isdir():
obj[IDX_MODE] |= S_IFDIR
obj[IDX_TARGET] = []
elif member.issym():
obj[IDX_MODE] |= S_IFLNK
obj[IDX_TARGET] = member.linkname
else:
logger.error("Unsupported type: {} ({})".format(member.type, name))
total_size += obj[IDX_SIZE]
while obj[-1] is None:
obj.pop()
dir.append(obj)
return mainroot, total_size
if __name__ == "__main__":
main()
|
the-stack_0_27250
|
# Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Pack and copy files for standalone mode.
This is still under heavy evolution, but expected to work for
macOS, Windows, and Linux. Patches for other platforms are
very welcome.
"""
from __future__ import print_function
import hashlib
import inspect
import marshal
import os
import pkgutil
import shutil
import subprocess
import sys
from nuitka import Options, SourceCodeReferences
from nuitka.__past__ import iterItems
from nuitka.Bytecodes import compileSourceToBytecode
from nuitka.containers.odict import OrderedDict
from nuitka.containers.oset import OrderedSet
from nuitka.importing import ImportCache
from nuitka.importing.StandardLibrary import (
getStandardLibraryPaths,
isStandardLibraryPath,
)
from nuitka.nodes.ModuleNodes import (
PythonShlibModule,
makeUncompiledPythonModule,
)
from nuitka.plugins.Plugins import Plugins
from nuitka.PythonVersions import python_version
from nuitka.Tracing import general, inclusion_logger, printError
from nuitka.tree.SourceReading import readSourceCodeFromFilename
from nuitka.utils import Utils
from nuitka.utils.AppDirs import getCacheDir
from nuitka.utils.Execution import getNullInput, withEnvironmentPathAdded
from nuitka.utils.FileOperations import (
areSamePaths,
copyTree,
getDirectoryRealPath,
getFileContentByLine,
getFileContents,
getFileList,
getSubDirectories,
haveSameFileContents,
isPathBelow,
listDir,
makePath,
putTextFileContents,
relpath,
resolveShellPatternToFilenames,
withFileLock,
)
from nuitka.utils.Importing import getSharedLibrarySuffixes
from nuitka.utils.ModuleNames import ModuleName
from nuitka.utils.SharedLibraries import (
callInstallNameTool,
getPyWin32Dir,
getSharedLibraryRPATH,
getWindowsDLLVersion,
removeMacOSCodeSignature,
removeSharedLibraryRPATH,
removeSxsFromDLL,
)
from nuitka.utils.ThreadedExecutor import ThreadPoolExecutor, waitWorkers
from nuitka.utils.Timing import TimerReport
from .DependsExe import detectDLLsWithDependencyWalker
from .IncludedDataFiles import IncludedDataFile, makeIncludedDataFile
def loadCodeObjectData(precompiled_filename):
# Ignoring magic numbers, etc. which we don't have to care for much as
# CPython already checked them (would have rejected it otherwise).
with open(precompiled_filename, "rb") as f:
return f.read()[8:]
module_names = set()
def _detectedPrecompiledFile(filename, module_name, result, user_provided, technical):
if filename.endswith(".pyc"):
if os.path.isfile(filename[:-1]):
return _detectedSourceFile(
filename=filename[:-1],
module_name=module_name,
result=result,
user_provided=user_provided,
technical=technical,
)
if module_name in module_names:
return
if Options.isShowInclusion():
inclusion_logger.info(
"Freezing module '%s' (from '%s')." % (module_name, filename)
)
uncompiled_module = makeUncompiledPythonModule(
module_name=module_name,
bytecode=loadCodeObjectData(precompiled_filename=filename),
is_package="__init__" in filename,
filename=filename,
user_provided=user_provided,
technical=technical,
)
ImportCache.addImportedModule(uncompiled_module)
result.append(uncompiled_module)
module_names.add(module_name)
def _detectedSourceFile(filename, module_name, result, user_provided, technical):
if module_name in module_names:
return
if module_name == "collections.abc":
_detectedSourceFile(
filename=filename,
module_name=ModuleName("_collections_abc"),
result=result,
user_provided=user_provided,
technical=technical,
)
source_code = readSourceCodeFromFilename(module_name, filename)
if module_name == "site":
if source_code.startswith("def ") or source_code.startswith("class "):
source_code = "\n" + source_code
source_code = """\
__file__ = (__nuitka_binary_dir + '%s%s') if '__nuitka_binary_dir' in dict(__builtins__ ) else '<frozen>';%s""" % (
os.path.sep,
os.path.basename(filename),
source_code,
)
# Debian stretch site.py
source_code = source_code.replace(
"PREFIXES = [sys.prefix, sys.exec_prefix]", "PREFIXES = []"
)
# Anaconda3 4.1.2 site.py
source_code = source_code.replace(
"def main():", "def main():return\n\nif 0:\n def _unused():"
)
if Options.isShowInclusion():
inclusion_logger.info(
"Freezing module '%s' (from '%s')." % (module_name, filename)
)
is_package = os.path.basename(filename) == "__init__.py"
# Plugins can modify source code:
source_code = Plugins.onFrozenModuleSourceCode(
module_name=module_name, is_package=is_package, source_code=source_code
)
bytecode = compileSourceToBytecode(
source_code=source_code,
filename=module_name.replace(".", os.path.sep) + ".py",
)
# Plugins can modify bytecode code:
bytecode = Plugins.onFrozenModuleBytecode(
module_name=module_name, is_package=is_package, bytecode=bytecode
)
uncompiled_module = makeUncompiledPythonModule(
module_name=module_name,
bytecode=marshal.dumps(bytecode),
is_package=is_package,
filename=filename,
user_provided=user_provided,
technical=technical,
)
ImportCache.addImportedModule(uncompiled_module)
result.append(uncompiled_module)
module_names.add(module_name)
def _detectedShlibFile(filename, module_name):
# That is not a shared library, but looks like one.
if module_name == "__main__":
return
# Cyclic dependency
from nuitka import ModuleRegistry
if ModuleRegistry.hasRootModule(module_name):
return
source_ref = SourceCodeReferences.fromFilename(filename=filename)
shlib_module = PythonShlibModule(module_name=module_name, source_ref=source_ref)
ModuleRegistry.addRootModule(shlib_module)
ImportCache.addImportedModule(shlib_module)
module_names.add(module_name)
def _detectImports(command, user_provided, technical):
# This is pretty complicated stuff, with variants to deal with.
# pylint: disable=too-many-branches,too-many-locals,too-many-statements
# Print statements for stuff to show, the modules loaded.
if python_version >= 0x300:
command += """
print("\\n".join(sorted(
"import %s # sourcefile %s" % (module.__name__, module.__file__)
for module in sys.modules.values()
if getattr(module, "__file__", None) not in (None, "<frozen>"
))), file = sys.stderr)"""
reduced_path = [
path_element
for path_element in sys.path
if not areSamePaths(path_element, ".")
if not areSamePaths(
path_element, os.path.dirname(sys.modules["__main__"].__file__)
)
]
# Make sure the right import path (the one Nuitka binary is running with)
# is used.
command = (
"import sys; sys.path = %s; sys.real_prefix = sys.prefix;" % repr(reduced_path)
) + command
import tempfile
tmp_file, tmp_filename = tempfile.mkstemp()
try:
if python_version >= 0x300:
command = command.encode("utf8")
os.write(tmp_file, command)
os.close(tmp_file)
process = subprocess.Popen(
args=[sys.executable, "-s", "-S", "-v", tmp_filename],
stdin=getNullInput(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=dict(os.environ, PYTHONIOENCODING="utf_8"),
)
_stdout, stderr = process.communicate()
finally:
os.unlink(tmp_filename)
# Don't let errors here go unnoticed.
if process.returncode != 0:
general.warning("There is a problem with detecting imports, CPython said:")
for line in stderr.split(b"\n"):
printError(line)
general.sysexit("Error, please report the issue with above output.")
result = []
detections = []
for line in stderr.replace(b"\r", b"").split(b"\n"):
if line.startswith(b"import "):
# print(line)
parts = line.split(b" # ", 2)
module_name = parts[0].split(b" ", 2)[1]
origin = parts[1].split()[0]
if python_version >= 0x300:
module_name = module_name.decode("utf-8")
module_name = ModuleName(module_name)
if origin == b"precompiled":
# This is a ".pyc" file that was imported, even before we have a
# chance to do anything, we need to preserve it.
filename = parts[1][len(b"precompiled from ") :]
if python_version >= 0x300:
filename = filename.decode("utf-8")
# Do not leave standard library when freezing.
if not isStandardLibraryPath(filename):
continue
detections.append((module_name, 3, "precompiled", filename))
elif origin == b"sourcefile":
filename = parts[1][len(b"sourcefile ") :]
if python_version >= 0x300:
filename = filename.decode("utf-8")
# Do not leave standard library when freezing.
if not isStandardLibraryPath(filename):
continue
if filename.endswith(".py"):
detections.append((module_name, 2, "sourcefile", filename))
elif filename.endswith(".pyc"):
detections.append((module_name, 3, "precompiled", filename))
elif not filename.endswith("<frozen>"):
# Python3 started lying in "__name__" for the "_decimal"
# calls itself "decimal", which then is wrong and also
# clashes with "decimal" proper
if python_version >= 0x300:
if module_name == "decimal":
module_name = ModuleName("_decimal")
detections.append((module_name, 2, "shlib", filename))
elif origin == b"dynamically":
# Shared library in early load, happens on RPM based systems and
# or self compiled Python installations.
filename = parts[1][len(b"dynamically loaded from ") :]
if python_version >= 0x300:
filename = filename.decode("utf-8")
# Do not leave standard library when freezing.
if not isStandardLibraryPath(filename):
continue
detections.append((module_name, 1, "shlib", filename))
for module_name, _prio, kind, filename in sorted(detections):
if kind == "precompiled":
_detectedPrecompiledFile(
filename=filename,
module_name=module_name,
result=result,
user_provided=user_provided,
technical=technical,
)
elif kind == "sourcefile":
_detectedSourceFile(
filename=filename,
module_name=module_name,
result=result,
user_provided=user_provided,
technical=technical,
)
elif kind == "shlib":
_detectedShlibFile(filename=filename, module_name=module_name)
else:
assert False, kind
return result
# Some modules we want to exclude.
_excluded_stdlib_modules = ["__main__.py", "__init__.py", "antigravity.py"]
if os.name != "nt":
# On posix systems, and posix Python veriants on Windows, this won't
# work.
_excluded_stdlib_modules.append("wintypes.py")
_excluded_stdlib_modules.append("cp65001.py")
def scanStandardLibraryPath(stdlib_dir):
# There is a lot of filtering here, done in branches, so there # is many of
# them, but that's acceptable, pylint: disable=too-many-branches,too-many-statements
for root, dirs, filenames in os.walk(stdlib_dir):
import_path = root[len(stdlib_dir) :].strip("/\\")
import_path = import_path.replace("\\", ".").replace("/", ".")
if import_path == "":
if "site-packages" in dirs:
dirs.remove("site-packages")
if "dist-packages" in dirs:
dirs.remove("dist-packages")
if "test" in dirs:
dirs.remove("test")
if "idlelib" in dirs:
dirs.remove("idlelib")
if "turtledemo" in dirs:
dirs.remove("turtledemo")
if "ensurepip" in filenames:
filenames.remove("ensurepip")
if "ensurepip" in dirs:
dirs.remove("ensurepip")
# Ignore "lib-dynload" and "lib-tk" and alike.
dirs[:] = [
dirname
for dirname in dirs
if not dirname.startswith("lib-")
if dirname != "Tools"
if not dirname.startswith("plat-")
]
if import_path in (
"tkinter",
"importlib",
"ctypes",
"unittest",
"sqlite3",
"distutils",
"email",
"bsddb",
):
if "test" in dirs:
dirs.remove("test")
if import_path == "distutils.command":
# Misbehaving and crashing while importing the world.
if "bdist_conda.py" in filenames:
filenames.remove("bdist_conda.py")
if import_path in ("lib2to3", "json", "distutils"):
if "tests" in dirs:
dirs.remove("tests")
if import_path == "asyncio":
if "test_utils.py" in filenames:
filenames.remove("test_utils.py")
if python_version >= 0x340 and Utils.isWin32Windows():
if import_path == "multiprocessing":
filenames.remove("popen_fork.py")
filenames.remove("popen_forkserver.py")
filenames.remove("popen_spawn_posix.py")
if python_version >= 0x300 and Utils.isPosixWindows():
if import_path == "curses":
filenames.remove("has_key.py")
if Utils.getOS() == "NetBSD":
if import_path == "xml.sax":
filenames.remove("expatreader.py")
for filename in filenames:
if filename.endswith(".py") and filename not in _excluded_stdlib_modules:
module_name = filename[:-3]
if import_path == "":
yield module_name
else:
yield import_path + "." + module_name
if python_version >= 0x300:
if "__pycache__" in dirs:
dirs.remove("__pycache__")
for dirname in dirs:
if import_path == "":
yield dirname
else:
yield import_path + "." + dirname
def _detectEarlyImports():
encoding_names = [
m[1] for m in pkgutil.iter_modules(sys.modules["encodings"].__path__)
]
if os.name != "nt":
# On posix systems, and posix Python veriants on Windows, these won't
# work and fail to import.
for encoding_name in ("mbcs", "cp65001", "oem"):
if encoding_name in encoding_names:
encoding_names.remove(encoding_name)
import_code = ";".join(
"import encodings.%s" % encoding_name
for encoding_name in sorted(encoding_names)
)
import_code += ";import locale;"
# For Python3 we patch inspect without knowing if it is used.
if python_version >= 0x300:
import_code += "import inspect;"
result = _detectImports(command=import_code, user_provided=False, technical=True)
if Options.shallFreezeAllStdlib():
stdlib_modules = set()
# Scan the standard library paths (multiple in case of virtualenv.
for stdlib_dir in getStandardLibraryPaths():
for module_name in scanStandardLibraryPath(stdlib_dir):
stdlib_modules.add(module_name)
# Put here ones that should be imported first.
first_ones = ("Tkinter",)
# We have to fight zombie modules in this, some things, e.g. Tkinter
# on newer Python 2.7, comes back after failure without a second error
# being raised, leading to other issues. So we fight it after each
# module that was tried, and prevent re-try by adding a meta path
# based loader that will never load it again, and remove it from the
# "sys.modules" over and over, once it sneaks back. The root cause is
# that extension modules sometimes only raise an error when first
# imported, not the second time around.
# Otherwise this just makes imports of everything so we can see where
# it comes from and what it requires.
import_code = """
imports = %r
failed = set()
class ImportBlocker(object):
def find_module(self, fullname, path = None):
if fullname in failed:
return self
return None
def load_module(self, name):
raise ImportError("%%s has failed before" %% name)
sys.meta_path.insert(0, ImportBlocker())
for imp in imports:
try:
__import__(imp)
except (ImportError, SyntaxError):
failed.add(imp)
except Exception:
sys.stderr("PROBLEM with '%%s'\\n" %% imp)
raise
for fail in failed:
if fail in sys.modules:
del sys.modules[fail]
""" % sorted(
stdlib_modules, key=lambda name: (name not in first_ones, name)
)
early_names = [module.getFullName() for module in result]
result += [
module
for module in _detectImports(
command=import_code, user_provided=False, technical=False
)
if module.getFullName() not in early_names
]
return result
def detectEarlyImports():
# Cyclic dependency
from nuitka import ModuleRegistry
early_modules = tuple(_detectEarlyImports())
for module in early_modules:
ModuleRegistry.addUncompiledModule(module)
return early_modules
_detected_python_rpath = None
ldd_result_cache = {}
def _detectBinaryPathDLLsPosix(dll_filename):
# This is complex, as it also includes the caching mechanism
# pylint: disable=too-many-branches
if ldd_result_cache.get(dll_filename):
return ldd_result_cache[dll_filename]
# Ask "ldd" about the libraries being used by the created binary, these
# are the ones that interest us.
result = set()
# This is the rpath of the Python binary, which will be effective when
# loading the other DLLs too. This happens at least for Python installs
# on Travis. pylint: disable=global-statement
global _detected_python_rpath
if _detected_python_rpath is None and not Utils.isPosixWindows():
_detected_python_rpath = getSharedLibraryRPATH(sys.executable) or False
if _detected_python_rpath:
_detected_python_rpath = _detected_python_rpath.replace(
"$ORIGIN", os.path.dirname(sys.executable)
)
with withEnvironmentPathAdded("LD_LIBRARY_PATH", _detected_python_rpath):
process = subprocess.Popen(
args=["ldd", dll_filename],
stdin=getNullInput(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
stderr = b"\n".join(
line
for line in stderr.splitlines()
if not line.startswith(
b"ldd: warning: you do not have execution permission for"
)
)
inclusion_logger.debug("ldd output for %s is:\n%s" % (dll_filename, stdout))
if stderr:
inclusion_logger.debug("ldd error for %s is:\n%s" % (dll_filename, stderr))
for line in stdout.split(b"\n"):
if not line:
continue
if b"=>" not in line:
continue
part = line.split(b" => ", 2)[1]
if b"(" in part:
filename = part[: part.rfind(b"(") - 1]
else:
filename = part
if not filename:
continue
if python_version >= 0x300:
filename = filename.decode("utf-8")
# Sometimes might use stuff not found or supplied by ldd itself.
if filename in ("not found", "ldd"):
continue
# Do not include kernel / glibc specific libraries. This list has been
# assembled by looking what are the most common .so files provided by
# glibc packages from ArchLinux, Debian Stretch and CentOS.
#
# Online sources:
# - https://centos.pkgs.org/7/puias-computational-x86_64/glibc-aarch64-linux-gnu-2.24-2.sdl7.2.noarch.rpm.html
# - https://centos.pkgs.org/7/centos-x86_64/glibc-2.17-222.el7.x86_64.rpm.html
# - https://archlinux.pkgs.org/rolling/archlinux-core-x86_64/glibc-2.28-5-x86_64.pkg.tar.xz.html
# - https://packages.debian.org/stretch/amd64/libc6/filelist
#
# Note: This list may still be incomplete. Some additional libraries
# might be provided by glibc - it may vary between the package versions
# and between Linux distros. It might or might not be a problem in the
# future, but it should be enough for now.
if os.path.basename(filename).startswith(
(
"ld-linux-x86-64.so",
"libc.so.",
"libpthread.so.",
"libm.so.",
"libdl.so.",
"libBrokenLocale.so.",
"libSegFault.so",
"libanl.so.",
"libcidn.so.",
"libcrypt.so.",
"libmemusage.so",
"libmvec.so.",
"libnsl.so.",
"libnss_compat.so.",
"libnss_db.so.",
"libnss_dns.so.",
"libnss_files.so.",
"libnss_hesiod.so.",
"libnss_nis.so.",
"libnss_nisplus.so.",
"libpcprofile.so",
"libresolv.so.",
"librt.so.",
"libthread_db-1.0.so",
"libthread_db.so.",
"libutil.so.",
)
):
continue
result.add(filename)
ldd_result_cache[dll_filename] = result
sub_result = set(result)
for sub_dll_filename in result:
sub_result = sub_result.union(_detectBinaryPathDLLsPosix(sub_dll_filename))
return sub_result
def _detectBinaryPathDLLsMacOS(original_dir, binary_filename, keep_unresolved):
result = set()
process = subprocess.Popen(
args=["otool", "-L", binary_filename],
stdin=getNullInput(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _stderr = process.communicate()
system_paths = (b"/usr/lib/", b"/System/Library/Frameworks/")
for line in stdout.split(b"\n")[1:]:
if not line:
continue
filename = line.split(b" (")[0].strip()
stop = False
for w in system_paths:
if filename.startswith(w):
stop = True
break
if not stop:
if python_version >= 0x300:
filename = filename.decode("utf-8")
# print("adding", filename)
result.add(filename)
resolved_result = _resolveBinaryPathDLLsMacOS(
original_dir, binary_filename, result, keep_unresolved
)
return resolved_result
def _resolveBinaryPathDLLsMacOS(original_dir, binary_filename, paths, keep_unresolved):
if keep_unresolved:
result = {}
else:
result = set()
rpaths = _detectBinaryRPathsMacOS(original_dir, binary_filename)
for path in paths:
if path.startswith("@rpath/"):
for rpath in rpaths:
if os.path.isfile(os.path.join(rpath, path[7:])):
resolved_path = os.path.join(rpath, path[7:])
break
else:
resolved_path = os.path.join(original_dir, path[7:])
elif path.startswith("@loader_path/"):
resolved_path = os.path.join(original_dir, path[13:])
else:
resolved_path = path
if keep_unresolved:
result.update({resolved_path: path})
else:
result.add(resolved_path)
return result
def _detectBinaryRPathsMacOS(original_dir, binary_filename):
result = set()
process = subprocess.Popen(
args=["otool", "-l", binary_filename],
stdin=getNullInput(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _stderr = process.communicate()
lines = stdout.split(b"\n")
for i, o in enumerate(lines):
if o.endswith(b"cmd LC_RPATH"):
line = lines[i + 2]
if python_version >= 0x300:
line = line.decode("utf-8")
line = line.split("path ")[1]
line = line.split(" (offset")[0]
if line.startswith("@loader_path"):
line = os.path.join(original_dir, line[13:])
elif line.startswith("@executable_path"):
continue
result.add(line)
return result
def _getCacheFilename(
dependency_tool, is_main_executable, source_dir, original_dir, binary_filename
):
original_filename = os.path.join(original_dir, os.path.basename(binary_filename))
original_filename = os.path.normcase(original_filename)
if is_main_executable:
# Normalize main program name for caching as well, but need to use the
# scons information to distinguish different compilers, so we use
# different libs there.
hashed_value = getFileContents(os.path.join(source_dir, "scons-report.txt"))
else:
hashed_value = original_filename
# Have different values for different Python major versions.
hashed_value += sys.version + sys.executable
if str is not bytes:
hashed_value = hashed_value.encode("utf8")
cache_dir = os.path.join(getCacheDir(), "library_deps", dependency_tool)
makePath(cache_dir)
return os.path.join(cache_dir, hashlib.md5(hashed_value).hexdigest())
_scan_dir_cache = {}
def getScanDirectories(package_name, original_dir):
# Many cases, pylint: disable=too-many-branches
cache_key = package_name, original_dir
if cache_key in _scan_dir_cache:
return _scan_dir_cache[cache_key]
scan_dirs = [sys.prefix]
if package_name is not None:
from nuitka.importing.Importing import findModule
package_dir = findModule(None, package_name, None, 0, False)[1]
if os.path.isdir(package_dir):
scan_dirs.append(package_dir)
scan_dirs.extend(getSubDirectories(package_dir))
if original_dir is not None:
scan_dirs.append(original_dir)
scan_dirs.extend(getSubDirectories(original_dir))
if (
Utils.isWin32Windows()
and package_name is not None
and package_name.isBelowNamespace("win32com")
):
pywin32_dir = getPyWin32Dir()
if pywin32_dir is not None:
scan_dirs.append(pywin32_dir)
for path_dir in os.environ["PATH"].split(";"):
if not os.path.isdir(path_dir):
continue
if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"])):
continue
if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"], "System32")):
continue
if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"], "SysWOW64")):
continue
scan_dirs.append(path_dir)
result = []
# Remove directories that hold no DLLs.
for scan_dir in scan_dirs:
sys.stdout.flush()
# These are useless, but plenty.
if os.path.basename(scan_dir) == "__pycache__":
continue
scan_dir = getDirectoryRealPath(scan_dir)
# No DLLs, no use.
if not any(entry[1].lower().endswith(".dll") for entry in listDir(scan_dir)):
continue
result.append(os.path.realpath(scan_dir))
_scan_dir_cache[cache_key] = result
return result
def detectBinaryPathDLLsWindowsDependencyWalker(
is_main_executable,
source_dir,
original_dir,
binary_filename,
package_name,
use_cache,
update_cache,
):
# This is the caching mechanism and plugin handling for DLL imports.
if use_cache or update_cache:
cache_filename = _getCacheFilename(
dependency_tool="depends.exe",
is_main_executable=is_main_executable,
source_dir=source_dir,
original_dir=original_dir,
binary_filename=binary_filename,
)
if use_cache:
with withFileLock():
if not os.path.exists(cache_filename):
use_cache = False
if use_cache:
result = OrderedSet()
for line in getFileContentByLine(cache_filename):
line = line.strip()
result.add(line)
return result
if Options.isShowProgress():
general.info("Analysing dependencies of '%s'." % binary_filename)
scan_dirs = getScanDirectories(package_name, original_dir)
result = detectDLLsWithDependencyWalker(binary_filename, scan_dirs)
if update_cache:
putTextFileContents(filename=cache_filename, contents=result)
return result
def detectBinaryDLLs(
is_main_executable,
source_dir,
original_filename,
binary_filename,
package_name,
use_cache,
update_cache,
):
"""Detect the DLLs used by a binary.
Using "ldd" (Linux), "depends.exe" (Windows), or
"otool" (macOS) the list of used DLLs is retrieved.
"""
if (
Utils.getOS() in ("Linux", "NetBSD", "FreeBSD", "OpenBSD")
or Utils.isPosixWindows()
):
return _detectBinaryPathDLLsPosix(dll_filename=original_filename)
elif Utils.isWin32Windows():
with TimerReport(
message="Running depends.exe for %s took %%.2f seconds" % binary_filename,
decider=Options.isShowProgress,
):
return detectBinaryPathDLLsWindowsDependencyWalker(
is_main_executable=is_main_executable,
source_dir=source_dir,
original_dir=os.path.dirname(original_filename),
binary_filename=binary_filename,
package_name=package_name,
use_cache=use_cache,
update_cache=update_cache,
)
elif Utils.getOS() == "Darwin":
return _detectBinaryPathDLLsMacOS(
original_dir=os.path.dirname(original_filename),
binary_filename=original_filename,
keep_unresolved=False,
)
else:
# Support your platform above.
assert False, Utils.getOS()
_unfound_dlls = set()
def detectUsedDLLs(source_dir, standalone_entry_points, use_cache, update_cache):
def addDLLInfo(count, source_dir, original_filename, binary_filename, package_name):
used_dlls = detectBinaryDLLs(
is_main_executable=count == 0,
source_dir=source_dir,
original_filename=original_filename,
binary_filename=binary_filename,
package_name=package_name,
use_cache=use_cache,
update_cache=update_cache,
)
# Allow plugins to prevent inclusion, this may discard things from used_dlls.
Plugins.removeDllDependencies(
dll_filename=binary_filename, dll_filenames=used_dlls
)
for dll_filename in sorted(tuple(used_dlls)):
if not os.path.isfile(dll_filename):
if _unfound_dlls:
general.warning(
"Dependency '%s' could not be found, you might need to copy it manually."
% dll_filename
)
_unfound_dlls.add(dll_filename)
used_dlls.remove(dll_filename)
return binary_filename, used_dlls
result = OrderedDict()
with ThreadPoolExecutor(max_workers=Utils.getCoreCount() * 3) as worker_pool:
workers = []
for count, standalone_entry_point in enumerate(standalone_entry_points):
workers.append(
worker_pool.submit(
addDLLInfo,
count,
source_dir,
standalone_entry_point.source_path,
standalone_entry_point.dest_path,
standalone_entry_point.package_name,
)
)
for binary_filename, used_dlls in waitWorkers(workers):
for dll_filename in used_dlls:
# We want these to be absolute paths. Solve that in the parts
# where detectBinaryDLLs is platform specific.
assert os.path.isabs(dll_filename), dll_filename
if dll_filename not in result:
result[dll_filename] = []
result[dll_filename].append(binary_filename)
return result
def fixupBinaryDLLPathsMacOS(binary_filename, dll_map, original_location):
""" For macOS, the binary needs to be told to use relative DLL paths """
# There may be nothing to do, in case there are no DLLs.
if not dll_map:
return
rpath_map = _detectBinaryPathDLLsMacOS(
original_dir=os.path.dirname(original_location),
binary_filename=original_location,
keep_unresolved=True,
)
for i, o in enumerate(dll_map):
dll_map[i] = (rpath_map.get(o[0], o[0]), o[1])
callInstallNameTool(
filename=binary_filename,
mapping=(
(original_path, "@executable_path/" + dist_path)
for (original_path, dist_path) in dll_map
),
rpath=None,
)
# These DLLs are run time DLLs from Microsoft, and packages will depend on different
# ones, but it will be OK to use the latest one.
ms_runtime_dlls = (
"msvcp140_1.dll",
"msvcp140.dll",
"vcruntime140_1.dll",
"concrt140.dll",
)
def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points):
# This is terribly complex, because we check the list of used DLLs
# trying to avoid duplicates, and detecting errors with them not
# being binary identical, so we can report them. And then of course
# we also need to handle OS specifics.
# pylint: disable=too-many-branches,too-many-locals,too-many-statements
used_dlls = detectUsedDLLs(
source_dir=source_dir,
standalone_entry_points=standalone_entry_points,
use_cache=not Options.shallNotUseDependsExeCachedResults()
and not Options.getWindowsDependencyTool() == "depends.exe",
update_cache=not Options.shallNotStoreDependsExeCachedResults()
and not Options.getWindowsDependencyTool() == "depends.exe",
)
removed_dlls = set()
warned_about = set()
# Fist make checks and remove some.
for dll_filename1, sources1 in tuple(iterItems(used_dlls)):
if dll_filename1 in removed_dlls:
continue
for dll_filename2, sources2 in tuple(iterItems(used_dlls)):
if dll_filename1 == dll_filename2:
continue
if dll_filename2 in removed_dlls:
continue
# Colliding basenames are an issue to us.
if os.path.basename(dll_filename1) != os.path.basename(dll_filename2):
continue
# May already have been removed earlier
if dll_filename1 not in used_dlls:
continue
if dll_filename2 not in used_dlls:
continue
dll_name = os.path.basename(dll_filename1)
if Options.isShowInclusion():
inclusion_logger.info(
"""Colliding DLL names for %s, checking identity of \
'%s' <-> '%s'."""
% (dll_name, dll_filename1, dll_filename2)
)
# Check that if a DLL has the same name, if it's identical, then it's easy.
if haveSameFileContents(dll_filename1, dll_filename2):
del used_dlls[dll_filename2]
removed_dlls.add(dll_filename2)
continue
# For Win32 we can check out file versions.
if Utils.isWin32Windows():
dll_version1 = getWindowsDLLVersion(dll_filename1)
dll_version2 = getWindowsDLLVersion(dll_filename2)
if dll_version2 < dll_version1:
del used_dlls[dll_filename2]
removed_dlls.add(dll_filename2)
solved = True
elif dll_version1 < dll_version2:
del used_dlls[dll_filename1]
removed_dlls.add(dll_filename1)
solved = True
else:
solved = False
if solved:
if dll_name not in warned_about and dll_name not in ms_runtime_dlls:
warned_about.add(dll_name)
inclusion_logger.warning(
"Conflicting DLLs for '%s' in your installation, newest file version used, hoping for the best."
% dll_name
)
continue
# So we have conflicting DLLs, in which case we do report the fact.
inclusion_logger.warning(
"""\
Ignoring non-identical DLLs for '%s'.
%s used by:
%s
different from
%s used by
%s"""
% (
dll_name,
dll_filename1,
"\n ".join(sources1),
dll_filename2,
"\n ".join(sources2),
)
)
del used_dlls[dll_filename2]
removed_dlls.add(dll_filename2)
dll_map = []
for dll_filename, sources in iterItems(used_dlls):
dll_name = os.path.basename(dll_filename)
target_path = os.path.join(dist_dir, dll_name)
shutil.copyfile(dll_filename, target_path)
dll_map.append((dll_filename, dll_name))
if Options.isShowInclusion():
inclusion_logger.info(
"Included used shared library '%s' (used by %s)."
% (dll_filename, ", ".join(sources))
)
if Utils.getOS() == "Darwin":
# For macOS, the binary and the DLLs needs to be changed to reflect
# the relative DLL location in the ".dist" folder.
for standalone_entry_point in standalone_entry_points:
fixupBinaryDLLPathsMacOS(
binary_filename=standalone_entry_point.dest_path,
dll_map=dll_map,
original_location=standalone_entry_point.source_path,
)
for original_path, dll_filename in dll_map:
fixupBinaryDLLPathsMacOS(
binary_filename=os.path.join(dist_dir, dll_filename),
dll_map=dll_map,
original_location=original_path,
)
# Remove code signature from CPython installed library
candidate = os.path.join(
dist_dir,
"Python",
)
if os.path.exists(candidate):
removeMacOSCodeSignature(candidate)
# Remove rpath settings.
if Utils.getOS() in ("Linux", "Darwin"):
# For Linux, the "rpath" of libraries may be an issue and must be
# removed.
if Utils.getOS() == "Darwin":
start = 0
else:
start = 1
for standalone_entry_point in standalone_entry_points[start:]:
removeSharedLibraryRPATH(standalone_entry_point.dest_path)
for _original_path, dll_filename in dll_map:
removeSharedLibraryRPATH(os.path.join(dist_dir, dll_filename))
if Utils.isWin32Windows():
if python_version < 0x300:
# For Win32, we might have to remove SXS paths
for standalone_entry_point in standalone_entry_points[1:]:
removeSxsFromDLL(standalone_entry_point.dest_path)
for _original_path, dll_filename in dll_map:
removeSxsFromDLL(os.path.join(dist_dir, dll_filename))
def _handleDataFile(dist_dir, tracer, included_datafile):
"""Handle a data file."""
if isinstance(included_datafile, IncludedDataFile):
if included_datafile.kind == "empty_dirs":
tracer.info(
"Included empty directories %s due to %s."
% (
",".join(included_datafile.dest_path),
included_datafile.reason,
)
)
for sub_dir in included_datafile.dest_path:
makePath(os.path.join(dist_dir, sub_dir))
elif included_datafile.kind == "data_file":
dest_path = os.path.join(dist_dir, included_datafile.dest_path)
tracer.info(
"Included data file %r due to %s."
% (
included_datafile.dest_path,
included_datafile.reason,
)
)
makePath(os.path.dirname(dest_path))
shutil.copyfile(included_datafile.source_path, dest_path)
elif included_datafile.kind == "data_dir":
dest_path = os.path.join(dist_dir, included_datafile.dest_path)
makePath(os.path.dirname(dest_path))
copied = copyTree(included_datafile.source_path, dest_path)
tracer.info(
"Included data dir %r with %d files due to %s."
% (
included_datafile.dest_path,
len(copied),
included_datafile.reason,
)
)
else:
assert False, included_datafile
else:
# TODO: Goal is have this unused.
source_desc, target_filename = included_datafile
if not isPathBelow(dist_dir, target_filename):
target_filename = os.path.join(dist_dir, target_filename)
makePath(os.path.dirname(target_filename))
if inspect.isfunction(source_desc):
content = source_desc(target_filename)
if content is not None: # support creation of empty directories
with open(
target_filename, "wb" if type(content) is bytes else "w"
) as output:
output.write(content)
else:
shutil.copy2(source_desc, target_filename)
def copyDataFiles(dist_dir):
"""Copy the data files needed for standalone distribution.
Args:
dist_dir: The distribution folder under creation
Notes:
This is for data files only, not DLLs or even extension modules,
those must be registered as entry points, and would not go through
necessary handling if provided like this.
"""
# Many details to deal with, pylint: disable=too-many-branches,too-many-locals
for pattern, dest, arg in Options.getShallIncludeDataFiles():
filenames = resolveShellPatternToFilenames(pattern)
if not filenames:
inclusion_logger.warning("No match data file to be included: %r" % pattern)
for filename in filenames:
file_reason = "specified data file %r on command line" % arg
rel_path = dest
if rel_path.endswith(("/", os.path.sep)):
rel_path = os.path.join(rel_path, os.path.basename(filename))
_handleDataFile(
dist_dir,
inclusion_logger,
makeIncludedDataFile(filename, rel_path, file_reason),
)
for src, dest in Options.getShallIncludeDataDirs():
filenames = getFileList(src)
if not filenames:
inclusion_logger.warning("No files in directory" % src)
for filename in filenames:
relative_filename = relpath(filename, src)
file_reason = "specified data dir %r on command line" % src
rel_path = os.path.join(dest, relative_filename)
_handleDataFile(
dist_dir,
inclusion_logger,
makeIncludedDataFile(filename, rel_path, file_reason),
)
# Cyclic dependency
from nuitka import ModuleRegistry
for module in ModuleRegistry.getDoneModules():
for plugin, included_datafile in Plugins.considerDataFiles(module):
_handleDataFile(
dist_dir=dist_dir, tracer=plugin, included_datafile=included_datafile
)
for module in ModuleRegistry.getDoneModules():
if module.isCompiledPythonPackage() or module.isUncompiledPythonPackage():
package_name = module.getFullName()
match, reason = package_name.matchesToShellPatterns(
patterns=Options.getShallIncludePackageData()
)
if match:
package_directory = module.getCompileTimeDirectory()
pkg_filenames = getFileList(
package_directory,
ignore_dirs=("__pycache__",),
ignore_suffixes=(".py", ".pyw", ".pyc", ".pyo", ".dll")
+ getSharedLibrarySuffixes(),
)
if pkg_filenames:
file_reason = "package '%s' %s" % (package_name, reason)
for pkg_filename in pkg_filenames:
rel_path = os.path.join(
package_name.asPath(),
os.path.relpath(pkg_filename, package_directory),
)
_handleDataFile(
dist_dir,
inclusion_logger,
makeIncludedDataFile(pkg_filename, rel_path, file_reason),
)
# assert False, (module.getCompileTimeDirectory(), pkg_files)
|
the-stack_0_27251
|
from lennoxs30api.metrics import Metrics
from lennoxs30api.s30api_async import (
LENNOX_HVAC_EMERGENCY_HEAT,
lennox_zone,
s30api_async,
lennox_system,
)
import json
import os
import asyncio
import pytest
from unittest.mock import patch
from lennoxs30api.s30exception import S30Exception
def loadfile(name) -> json:
script_dir = os.path.dirname(__file__) + "/messages/"
file_path = os.path.join(script_dir, name)
with open(file_path) as f:
data = json.load(f)
return data
@pytest.fixture
def metrics() -> Metrics:
return Metrics()
@pytest.fixture
def api(single_setpoint: bool = False) -> s30api_async:
asyncio.set_event_loop(asyncio.new_event_loop())
api = s30api_async("[email protected]", "mypassword", None)
data = loadfile("login_response.json")
api.process_login_response(data)
data = loadfile("config_response_system_01.json")
api.processMessage(data)
data = loadfile("config_response_system_02.json")
api.processMessage(data)
data = loadfile("config_system_03_heatpump_and_furnace.json")
api.processMessage(data)
if single_setpoint == True:
data = loadfile("equipments_lcc_singlesetpoint.json")
data["SenderID"] = "0000000-0000-0000-0000-000000000001"
else:
data = loadfile("equipments_lcc_splitsetpoint.json")
data["SenderID"] = "0000000-0000-0000-0000-000000000001"
api.processMessage(data)
return api
|
the-stack_0_27253
|
# -*- coding: utf-8 -*-
import datetime
import pytest
import numpy as np
from ..probability import PDA, JPDA
from ...types.detection import Detection, MissedDetection
from ...types.state import GaussianState
from ...types.track import Track
@pytest.fixture(params=[PDA, JPDA])
def associator(request, probability_hypothesiser):
if request.param is PDA:
return request.param(probability_hypothesiser)
elif request.param is JPDA:
return request.param(probability_hypothesiser)
def test_probability(associator):
timestamp = datetime.datetime.now()
t1 = Track([GaussianState(np.array([[0, 0, 0, 0]]), np.diag([1, 0.1, 1, 0.1]), timestamp)])
t2 = Track([GaussianState(np.array([[3, 0, 3, 0]]), np.diag([1, 0.1, 1, 0.1]), timestamp)])
d1 = Detection(np.array([[0, 0]]), timestamp)
d2 = Detection(np.array([[5, 5]]), timestamp)
tracks = {t1, t2}
detections = {d1, d2}
associations = associator.associate(tracks, detections, timestamp)
# There should be 2 associations
assert len(associations) == 2
# verify association probabilities are correct
prob_t1_d1_association = [hyp.probability for hyp in associations[t1]
if hyp.measurement is d1]
prob_t1_d2_association = [hyp.probability for hyp in associations[t1]
if hyp.measurement is d2]
prob_t2_d1_association = [hyp.probability for hyp in associations[t2]
if hyp.measurement is d1]
prob_t2_d2_association = [hyp.probability for hyp in associations[t2]
if hyp.measurement is d2]
assert prob_t1_d1_association[0] > prob_t1_d2_association[0]
assert prob_t2_d1_association[0] < prob_t2_d2_association[0]
def test_missed_detection_probability(associator):
timestamp = datetime.datetime.now()
t1 = Track([GaussianState(np.array([[0, 0, 0, 0]]), np.diag([1, 0.1, 1, 0.1]), timestamp)])
t2 = Track([GaussianState(np.array([[3, 0, 3, 0]]), np.diag([1, 0.1, 1, 0.1]), timestamp)])
d1 = Detection(np.array([[20, 20]]), timestamp)
tracks = {t1, t2}
detections = {d1}
associations = associator.associate(tracks, detections, timestamp)
# Best hypothesis should be missed detection hypothesis
max_track1_prob = max([hyp.probability for hyp in associations[t1]])
max_track2_prob = max([hyp.probability for hyp in associations[t1]])
track1_missed_detect_prob = max(
[hyp.probability for hyp in associations[t1]
if isinstance(hyp.measurement, MissedDetection)])
track2_missed_detect_prob = max(
[hyp.probability for hyp in associations[t1]
if isinstance(hyp.measurement, MissedDetection)])
assert max_track1_prob == track1_missed_detect_prob
assert max_track2_prob == track2_missed_detect_prob
def test_no_detections_probability(associator):
timestamp = datetime.datetime.now()
t1 = Track([GaussianState(np.array([[0, 0, 0, 0]]), np.diag([1, 0.1, 1, 0.1]), timestamp)])
t2 = Track([GaussianState(np.array([[3, 0, 3, 0]]), np.diag([1, 0.1, 1, 0.1]), timestamp)])
tracks = {t1, t2}
detections = {}
associations = associator.associate(tracks, detections, timestamp)
# All hypotheses should be missed detection hypothesis
assert all(isinstance(hypothesis.measurement, MissedDetection)
for multihyp in associations.values()
for hypothesis in multihyp)
def test_no_tracks_probability(associator):
timestamp = datetime.datetime.now()
d1 = Detection(np.array([[2, 2]]), timestamp)
d2 = Detection(np.array([[5, 5]]), timestamp)
tracks = {}
detections = {d1, d2}
associations = associator.associate(tracks, detections, timestamp)
# Since no Tracks went in, there should be no associations
assert not associations
|
the-stack_0_27254
|
from __future__ import print_function
import os
import re
import sys
from talib import abstract
# FIXME: initialize once, then shutdown at the end, rather than each call?
# FIXME: should we pass startIdx and endIdx into function?
# FIXME: don't return number of elements since it always equals allocation?
functions = []
include_paths = ['/usr/include', '/usr/local/include', '/opt/include', '/opt/local/include', '/opt/homebrew/include']
if sys.platform == 'win32':
include_paths = [r'c:\ta-lib\c\include']
header_found = False
for path in include_paths:
ta_func_header = os.path.join(path, 'ta-lib', 'ta_func.h')
if os.path.exists(ta_func_header):
header_found = True
break
if not header_found:
print('Error: ta-lib/ta_func.h not found', file=sys.stderr)
sys.exit(1)
with open(ta_func_header) as f:
tmp = []
for line in f:
line = line.strip()
if tmp or \
line.startswith('TA_RetCode TA_') or \
line.startswith('int TA_'):
line = re.sub('/\*[^\*]+\*/', '', line) # strip comments
tmp.append(line)
if not line:
s = ' '.join(tmp)
s = re.sub('\s+', ' ', s)
functions.append(s)
tmp = []
# strip "float" functions
functions = [s for s in functions if not s.startswith('TA_RetCode TA_S_')]
# strip non-indicators
functions = [s for s in functions if not s.startswith('TA_RetCode TA_Set')]
functions = [s for s in functions if not s.startswith('TA_RetCode TA_Restore')]
# print headers
print("""\
cimport numpy as np
from cython import boundscheck, wraparound
cimport _ta_lib as lib
from _ta_lib cimport TA_RetCode
# NOTE: _ta_check_success, NaN are defined in common.pxi
# NumPy C API is initialize in _func.pxi
""")
# cleanup variable names to make them more pythonic
def cleanup(name):
if name.startswith('in'):
return name[2:].lower()
elif name.startswith('optIn'):
return name[5:].lower()
else:
return name.lower()
# print functions
names = []
for f in functions:
if 'Lookback' in f: # skip lookback functions
continue
i = f.index('(')
name = f[:i].split()[1]
args = f[i:].split(',')
args = [re.sub('[\(\);]', '', s).strip() for s in args]
shortname = name[3:]
names.append(shortname)
func_info = abstract.Function(shortname).info
defaults, documentation = abstract._get_defaults_and_docs(func_info)
print('@wraparound(False) # turn off relative indexing from end of lists')
print('@boundscheck(False) # turn off bounds-checking for entire function')
print('def stream_%s(' % shortname, end=' ')
docs = [' %s(' % shortname]
i = 0
for arg in args:
var = arg.split()[-1]
if var in ('startIdx', 'endIdx'):
continue
elif 'out' in var:
break
if i > 0:
print(',', end=' ')
i += 1
if var.endswith('[]'):
var = cleanup(var[:-2])
assert arg.startswith('const double'), arg
print('np.ndarray %s not None' % var, end=' ')
docs.append(var)
docs.append(', ')
elif var.startswith('opt'):
var = cleanup(var)
default_arg = arg.split()[-1][len('optIn'):] # chop off typedef and 'optIn'
default_arg = default_arg[0].lower() + default_arg[1:] # lowercase first letter
if arg.startswith('double'):
if default_arg in defaults:
print('double %s=%s' % (var, defaults[default_arg]), end=' ')
else:
print('double %s=-4e37' % var, end=' ') # TA_REAL_DEFAULT
elif arg.startswith('int'):
if default_arg in defaults:
print('int %s=%s' % (var, defaults[default_arg]), end=' ')
else:
print('int %s=-2**31' % var, end=' ') # TA_INTEGER_DEFAULT
elif arg.startswith('TA_MAType'):
print('int %s=0' % var, end=' ') # TA_MAType_SMA
else:
assert False, arg
if '[, ' not in docs:
docs[-1] = ('[, ')
docs.append('%s=?' % var)
docs.append(', ')
docs[-1] = '])' if '[, ' in docs else ')'
if documentation:
tmp_docs = []
lower_case = False
documentation = documentation.split('\n')[2:] # discard abstract calling definition
for line in documentation:
line = line.replace('Substraction', 'Subtraction')
if 'prices' not in line and 'price' in line:
line = line.replace('price', 'real')
if not line or line.isspace():
tmp_docs.append('')
else:
tmp_docs.append(' %s' % line) # add an indent of 4 spaces
docs.append('\n\n')
docs.append('\n'.join(tmp_docs))
docs.append('\n ')
print('):')
print(' """%s"""' % ''.join(docs))
print(' cdef:')
print(' np.npy_intp length')
print(' TA_RetCode retCode')
for arg in args:
var = arg.split()[-1]
if 'out' in var:
break
if var.endswith('[]'):
var = cleanup(var[:-2])
if 'double' in arg:
print(' double* %s_data' % var)
elif 'int' in arg:
print(' int* %s_data' % var)
else:
assert False, args
for arg in args:
var = arg.split()[-1]
if 'out' not in var:
continue
if var.endswith('[]'):
var = cleanup(var[:-2])
if 'double' in arg:
print(' double %s' % var)
elif 'int' in arg:
print(' int %s' % var)
else:
assert False, args
elif var.startswith('*'):
var = cleanup(var[1:])
print(' int %s' % var)
else:
assert False, arg
for arg in args:
var = arg.split()[-1]
if 'out' in var:
break
if var.endswith('[]'):
var = cleanup(var[:-2])
if 'double' in arg:
cast = '<double*>'
else:
assert False, arg
print(' %s = check_array(%s)' % (var, var))
print(' %s_data = %s%s.data' % (var, cast, var))
# check all input array lengths are the same
inputs = []
for arg in args:
var = arg.split()[-1]
if 'out' in var:
break
if var.endswith('[]'):
var = cleanup(var[:-2])
inputs.append(var)
if len(inputs) == 1:
print(' length = %s.shape[0]' % inputs[0])
else:
print(' length = check_length%s(%s)' % (len(inputs), ', '.join(inputs)))
for arg in args:
var = arg.split()[-1]
if 'out' not in var:
continue
if var.endswith('[]'):
var = cleanup(var[:-2])
if 'double' in arg:
print(' %s = NaN' % var)
elif 'int' in arg:
print(' %s = 0' % var)
else:
assert False, args
print(' retCode = lib.%s(' % name, end=' ')
for i, arg in enumerate(args):
if i > 0:
print(',', end=' ')
var = arg.split()[-1]
if var.endswith('[]'):
var = cleanup(var[:-2])
if 'out' in var:
print('&%s' % var, end=' ')
else:
print('%s_data' % var, end=' ')
elif var.startswith('*'):
var = cleanup(var[1:])
print('&%s' % var, end=' ')
elif var in ('startIdx', 'endIdx'):
print('<int>(length) - 1', end= ' ')
else:
cleaned = cleanup(var)
print(cleaned, end=' ')
print(')')
print(' _ta_check_success("%s", retCode)' % name)
print(' return ', end='')
i = 0
for arg in args:
var = arg.split()[-1]
if var.endswith('[]'):
var = var[:-2]
elif var.startswith('*'):
var = var[1:]
if var.startswith('out'):
if var not in ("outNBElement", "outBegIdx"):
if i > 0:
print(',', end=' ')
i += 1
print(cleanup(var), end=' ')
else:
assert re.match('.*(void|startIdx|endIdx|opt|in)/*', arg), arg
print('')
print('')
|
the-stack_0_27255
|
# Copyright (C) 2019 The Raphielscape Company LLC.; Licensed under the Raphielscape Public License, Version 1.d (the "License"); you may not use this file except in compliance with the License.
""" Userbot module containing commands related to the Information Superhighway (yes, Internet). """
from datetime import datetime
from speedtest import Speedtest
from telethon import functions
from userbot import CMD_HELP
from userbot.events import register
from userbot.utils import humanbytes
@register(outgoing=True, pattern=r"^\.speedtest$")
async def speedtest(event):
""" For .speed command, use SpeedTest to check server speeds. """
await event.edit("`Running speed test...`")
test = Speedtest()
await event.edit("`Choosing best server...`")
test.get_best_server()
await event.edit("`Testing download speed...`")
test.download()
await event.edit("`Testing upload speed...`")
test.upload()
result = test.results.dict()
msg = (
f"`--Started at {result['timestamp']}--`\n\n"
f"`Ping:` `{result['ping']}`\n"
f"`Upload:` `{humanbytes(result['upload'])}/s`\n"
f"`Download:` `{humanbytes(result['download'])}/s`\n"
f"`ISP:` `{result['client']['isp']}`\n"
f"`Country:` `{result['client']['country']}`\n"
f"`Name:` `{result['server']['name']}`\n"
f"`Country:` `{result['server']['country']}`\n"
f"`Sponsor:` `{result['server']['sponsor']}`\n\n"
)
await event.edit(msg)
def speed_convert(size):
"""
Hi human, you can't read bytes?
"""
power = 2 ** 10
zero = 0
units = {0: "", 1: "Kb/s", 2: "Mb/s", 3: "Gb/s", 4: "Tb/s"}
while size > power:
size /= power
zero += 1
return f"{round(size, 2)} {units[zero]}"
@register(outgoing=True, pattern="^.dc$")
async def neardc(event):
""" For .dc command, get the nearest datacenter information. """
result = await event.client(functions.help.GetNearestDcRequest())
await event.edit(
f"Country : `{result.country}`\n"
f"Nearest Datacenter : `{result.nearest_dc}`\n"
f"This Datacenter : `{result.this_dc}`"
)
@register(outgoing=True, pattern="^.ping$")
async def pingme(pong):
""" For .ping command, ping the userbot from any chat. """
start = datetime.now()
await pong.edit("`Pong!`")
end = datetime.now()
duration = (end - start).microseconds / 1000
await pong.edit("`Pong!\n%sms`" % (duration))
CMD_HELP.update(
{
"dc": ".dc\
\nUsage: Finds the nearest datacenter from your server."
}
)
CMD_HELP.update(
{
"ping": ".ping\
\nUsage: Shows how long it takes to ping your bot."
}
)
CMD_HELP.update(
{
"speedtest": ".speedtest\
\nUsage: Does a speedtest and shows results."
}
)
|
the-stack_0_27256
|
import os
import json
defaultPackageName = "package.json"
def getPackageVersions():
result = []
if os.path.exists(defaultPackageName):
with open(defaultPackageName, "r") as packageFile:
packageData = json.loads(packageFile.read())
if "dependencies" in packageData:
for item in packageData["dependencies"]:
result.append((item, packageData["dependencies"][item], "-", "-"))
return result
|
the-stack_0_27258
|
# /usr/bin/env python
"""Parsers for Clustal and related formats (e.g. MUSCLE).
Implementation Notes:
Currently, does not check whether sequences are the same length and are in
order. Skips any line that starts with a blank.
ClustalParser preserves the order of the sequences from the original file.
However, it does use a dict as an intermediate, so two sequences can't have
the same label. This is probably OK since Clustal will refuse to run on a
FASTA file in which two sequences have the same label, but could potentially
cause trouble with manually edited files (all the segments of the conflicting
sequences would be interleaved, possibly in an unpredictable way).
If the lines have trailing numbers (i.e. Clustal was run with -LINENOS=ON),
silently deletes them. Does not check that the numbers actually correspond to
the number of chars in the sequence printed so far.
"""
from cogent3.parse.record import DelimitedSplitter, RecordError
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2022, The Cogent Project"
__credits__ = ["Rob Knight", "Sandra Smit", "Gavin Huttley", "Peter Maxwell"]
__license__ = "BSD-3"
__version__ = "2022.4.20a1"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Development"
strip = str.strip
def LabelLineParser(record, splitter, strict=True):
"""Returns dict mapping list of data to labels, plus list with field order.
Field order contains labels in order encountered in file.
NOTE: doesn't care if lines are out of order in different blocks. This
should never happen anyway, but it's possible that this behavior should
be changed to tighten up validation.
"""
labels = []
result = {}
for line in record:
try:
key, val = splitter(line.rstrip())
except:
if strict:
raise RecordError(f"Failed to extract key and value from line {line}")
else:
continue # just skip the line if not strict
if key in result:
result[key].append(val)
else:
result[key] = [val]
labels.append(key)
return result, labels
def is_clustal_seq_line(line):
"""Returns True if line starts with a non-blank character but not 'CLUSTAL'.
Useful for filtering other lines out of the file.
"""
return (
line
and (not line[0].isspace())
and (not line.startswith("CLUSTAL"))
and (not line.startswith("MUSCLE"))
)
last_space = DelimitedSplitter(None, -1)
def delete_trailing_number(line):
"""Deletes trailing number from a line.
WARNING: does not preserve internal whitespace when a number is removed!
(converts each whitespace run to a single space). Returns the original
line if it didn't end in a number.
"""
pieces = line.split()
try:
int(pieces[-1])
return " ".join(pieces[:-1])
except ValueError: # no trailing numbers
return line
def MinimalClustalParser(record, strict=True):
"""Returns (data, label_order) tuple.
Data is dict of label -> sequence (pieces not joined).
"""
return LabelLineParser(
list(map(delete_trailing_number, list(filter(is_clustal_seq_line, record)))),
last_space,
strict,
)
def ClustalParser(record, strict=True):
seqs, labels = MinimalClustalParser(record, strict)
for l in labels:
yield l, "".join(seqs[l])
|
the-stack_0_27259
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows platform implementation."""
import errno
import os
import sys
from psutil import _common
from psutil._common import conn_tmap, usage_percent, isfile_strict
from psutil._compat import PY3, xrange, wraps, lru_cache, namedtuple
import _psutil_windows as cext
# process priority constants, import from __init__.py:
# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
__extra__all__ = ["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
"HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
"NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
#
"CONN_DELETE_TCB",
]
# --- module level constants (gets pushed up to psutil module)
CONN_DELETE_TCB = "DELETE_TCB"
WAIT_TIMEOUT = 0x00000102 # 258 in decimal
ACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,
cext.ERROR_ACCESS_DENIED])
TCP_STATUSES = {
cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
}
scputimes = namedtuple('scputimes', ['user', 'system', 'idle'])
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
pextmem = namedtuple(
'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
'pagefile', 'peak_pagefile', 'private'])
pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# set later from __init__.py
NoSuchProcess = None
AccessDenied = None
TimeoutExpired = None
@lru_cache(maxsize=512)
def _win32_QueryDosDevice(s):
return cext.win32_QueryDosDevice(s)
def _convert_raw_path(s):
# convert paths using native DOS format like:
# "\Device\HarddiskVolume1\Windows\systemew\file.txt"
# into: "C:\Windows\systemew\file.txt"
if PY3 and not isinstance(s, str):
s = s.decode('utf8')
rawdrive = '\\'.join(s.split('\\')[:3])
driveletter = _win32_QueryDosDevice(rawdrive)
return os.path.join(driveletter, s[len(rawdrive):])
# --- public functions
def virtual_memory():
"""System virtual memory as a namedtuple."""
mem = cext.virtual_mem()
totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
#
total = totphys
avail = availphys
free = availphys
used = total - avail
percent = usage_percent((total - avail), total, _round=1)
return svmem(total, avail, percent, used, free)
def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
mem = cext.virtual_mem()
total = mem[2]
free = mem[3]
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent, 0, 0)
def disk_usage(path):
"""Return disk usage associated with path."""
try:
total, free = cext.disk_usage(path)
except WindowsError:
if not os.path.exists(path):
msg = "No such file or directory: '%s'" % path
raise OSError(errno.ENOENT, msg)
raise
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sdiskusage(total, used, free, percent)
def disk_partitions(all):
"""Return disk partitions."""
rawlist = cext.disk_partitions(all)
return [_common.sdiskpart(*x) for x in rawlist]
def cpu_times():
"""Return system CPU times as a named tuple."""
user, system, idle = cext.cpu_times()
return scputimes(user, system, idle)
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples."""
ret = []
for cpu_t in cext.per_cpu_times():
user, system, idle = cpu_t
item = scputimes(user, system, idle)
ret.append(item)
return ret
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
return cext.cpu_count_logical()
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
return cext.cpu_count_phys()
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
return cext.boot_time()
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
rawlist = cext.net_connections(_pid, families, types)
ret = []
for item in rawlist:
fd, fam, type, laddr, raddr, status, pid = item
status = TCP_STATUSES[status]
if _pid == -1:
nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
else:
nt = _common.pconn(fd, fam, type, laddr, raddr, status)
ret.append(nt)
return ret
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, hostname, tstamp = item
nt = _common.suser(user, None, hostname, tstamp)
retlist.append(nt)
return retlist
pids = cext.pids
pid_exists = cext.pid_exists
net_io_counters = cext.net_io_counters
disk_io_counters = cext.disk_io_counters
ppid_map = cext.ppid_map # not meant to be public
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and WindowsError
exceptions into NoSuchProcess and AccessDenied.
"""
@wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
err = sys.exc_info()[1]
if err.errno in ACCESS_DENIED_SET:
raise AccessDenied(self.pid, self._name)
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name"]
def __init__(self, pid):
self.pid = pid
self._name = None
@wrap_exceptions
def name(self):
"""Return process name, which on Windows is always the final
part of the executable.
"""
# This is how PIDs 0 and 4 are always represented in taskmgr
# and process-hacker.
if self.pid == 0:
return "System Idle Process"
elif self.pid == 4:
return "System"
else:
return os.path.basename(self.exe())
@wrap_exceptions
def exe(self):
# Note: os.path.exists(path) may return False even if the file
# is there, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
return _convert_raw_path(cext.proc_exe(self.pid))
@wrap_exceptions
def cmdline(self):
return cext.proc_cmdline(self.pid)
def ppid(self):
try:
return ppid_map()[self.pid]
except KeyError:
raise NoSuchProcess(self.pid, self._name)
def _get_raw_meminfo(self):
try:
return cext.proc_memory_info(self.pid)
except OSError:
err = sys.exc_info()[1]
if err.errno in ACCESS_DENIED_SET:
return cext.proc_memory_info_2(self.pid)
raise
@wrap_exceptions
def memory_info(self):
# on Windows RSS == WorkingSetSize and VSM == PagefileUsage
# fields of PROCESS_MEMORY_COUNTERS struct:
# http://msdn.microsoft.com/en-us/library/windows/desktop/
# ms684877(v=vs.85).aspx
t = self._get_raw_meminfo()
return _common.pmem(t[2], t[7])
@wrap_exceptions
def memory_info_ex(self):
return pextmem(*self._get_raw_meminfo())
def memory_maps(self):
try:
raw = cext.proc_memory_maps(self.pid)
except OSError:
# XXX - can't use wrap_exceptions decorator as we're
# returning a generator; probably needs refactoring.
err = sys.exc_info()[1]
if err.errno in ACCESS_DENIED_SET:
raise AccessDenied(self.pid, self._name)
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
raise
else:
for addr, perm, path, rss in raw:
path = _convert_raw_path(path)
addr = hex(addr)
yield (addr, perm, path, rss)
@wrap_exceptions
def kill(self):
return cext.proc_kill(self.pid)
@wrap_exceptions
def wait(self, timeout=None):
if timeout is None:
timeout = cext.INFINITE
else:
# WaitForSingleObject() expects time in milliseconds
timeout = int(timeout * 1000)
ret = cext.proc_wait(self.pid, timeout)
if ret == WAIT_TIMEOUT:
# support for private module import
if TimeoutExpired is None:
raise RuntimeError("timeout expired")
raise TimeoutExpired(timeout, self.pid, self._name)
return ret
@wrap_exceptions
def username(self):
if self.pid in (0, 4):
return 'NT AUTHORITY\\SYSTEM'
return cext.proc_username(self.pid)
@wrap_exceptions
def create_time(self):
# special case for kernel process PIDs; return system boot time
if self.pid in (0, 4):
return boot_time()
try:
return cext.proc_create_time(self.pid)
except OSError:
err = sys.exc_info()[1]
if err.errno in ACCESS_DENIED_SET:
return cext.proc_create_time_2(self.pid)
raise
@wrap_exceptions
def num_threads(self):
return cext.proc_num_threads(self.pid)
@wrap_exceptions
def threads(self):
rawlist = cext.proc_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = _common.pthread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def cpu_times(self):
try:
ret = cext.proc_cpu_times(self.pid)
except OSError:
err = sys.exc_info()[1]
if err.errno in ACCESS_DENIED_SET:
ret = cext.proc_cpu_times_2(self.pid)
else:
raise
return _common.pcputimes(*ret)
@wrap_exceptions
def suspend(self):
return cext.proc_suspend(self.pid)
@wrap_exceptions
def resume(self):
return cext.proc_resume(self.pid)
@wrap_exceptions
def cwd(self):
if self.pid in (0, 4):
raise AccessDenied(self.pid, self._name)
# return a normalized pathname since the native C function appends
# "\\" at the and of the path
path = cext.proc_cwd(self.pid)
return os.path.normpath(path)
@wrap_exceptions
def open_files(self):
if self.pid in (0, 4):
return []
retlist = []
# Filenames come in in native format like:
# "\Device\HarddiskVolume1\Windows\systemew\file.txt"
# Convert the first part in the corresponding drive letter
# (e.g. "C:\") by using Windows's QueryDosDevice()
raw_file_names = cext.proc_open_files(self.pid)
for file in raw_file_names:
file = _convert_raw_path(file)
if isfile_strict(file) and file not in retlist:
ntuple = _common.popenfile(file, -1)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def connections(self, kind='inet'):
return net_connections(kind, _pid=self.pid)
@wrap_exceptions
def nice_get(self):
return cext.proc_priority_get(self.pid)
@wrap_exceptions
def nice_set(self, value):
return cext.proc_priority_set(self.pid, value)
# available on Windows >= Vista
if hasattr(cext, "proc_io_priority_get"):
@wrap_exceptions
def ionice_get(self):
return cext.proc_io_priority_get(self.pid)
@wrap_exceptions
def ionice_set(self, value, _):
if _:
raise TypeError("set_proc_ionice() on Windows takes only "
"1 argument (2 given)")
if value not in (2, 1, 0):
raise ValueError("value must be 2 (normal), 1 (low) or 0 "
"(very low); got %r" % value)
return cext.proc_io_priority_set(self.pid, value)
@wrap_exceptions
def io_counters(self):
try:
ret = cext.proc_io_counters(self.pid)
except OSError:
err = sys.exc_info()[1]
if err.errno in ACCESS_DENIED_SET:
ret = cext.proc_io_counters_2(self.pid)
else:
raise
return _common.pio(*ret)
@wrap_exceptions
def status(self):
suspended = cext.proc_is_suspended(self.pid)
if suspended:
return _common.STATUS_STOPPED
else:
return _common.STATUS_RUNNING
@wrap_exceptions
def cpu_affinity_get(self):
from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
bitmask = cext.proc_cpu_affinity_get(self.pid)
return from_bitmask(bitmask)
@wrap_exceptions
def cpu_affinity_set(self, value):
def to_bitmask(l):
if not l:
raise ValueError("invalid argument %r" % l)
out = 0
for b in l:
out |= 2 ** b
return out
# SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
# is returned for an invalid CPU but this seems not to be true,
# therefore we check CPUs validy beforehand.
allcpus = list(range(len(per_cpu_times())))
for cpu in value:
if cpu not in allcpus:
raise ValueError("invalid CPU %r" % cpu)
bitmask = to_bitmask(value)
cext.proc_cpu_affinity_set(self.pid, bitmask)
@wrap_exceptions
def num_handles(self):
try:
return cext.proc_num_handles(self.pid)
except OSError:
err = sys.exc_info()[1]
if err.errno in ACCESS_DENIED_SET:
return cext.proc_num_handles_2(self.pid)
raise
@wrap_exceptions
def num_ctx_switches(self):
tupl = cext.proc_num_ctx_switches(self.pid)
return _common.pctxsw(*tupl)
|
the-stack_0_27261
|
#!/usr/bin/python
# Copyright
# DESCRIPTION
# This is toaster automation base class and test cases file
# History:
# 2015.03.09 inital version
# 2015.03.23 adding toaster_test.cfg, run_toastertest.py so we can run case by case from outside
# Briefs:
# This file is comprised of 3 parts:
# I: common utils like sorting, getting attribute.. etc
# II: base class part, which complies with unittest frame work and
# contains class selenium-based functions
# III: test cases
# to add new case: just implement new test_xxx() function in class toaster_cases
# NOTES for cases:
# case 946:
# step 6 - 8 needs to be observed using screenshots
# case 956:
# step 2 - 3 needs to be run manually
import unittest, time, re, sys, getopt, os, logging, string, errno, exceptions
import shutil, argparse, ConfigParser, platform, json
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium import selenium
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
import sqlite3 as sqlite
###########################################
# #
# PART I: utils stuff #
# #
###########################################
class Listattr(object):
"""
Set of list attribute. This is used to determine what the list content is.
Later on we may add more attributes here.
"""
NULL = "null"
NUMBERS = "numbers"
STRINGS = "strings"
PERCENT = "percentage"
SIZE = "size"
UNKNOWN = "unknown"
def get_log_root_dir():
max_depth = 5
parent_dir = '../'
for number in range(0, max_depth):
if os.path.isdir(sys.path[0] + os.sep + (os.pardir + os.sep)*number + 'log'):
log_root_dir = os.path.abspath(sys.path[0] + os.sep + (os.pardir + os.sep)*number + 'log')
break
if number == (max_depth - 1):
print('No log dir found. Please check')
raise Exception
return log_root_dir
def mkdir_p(dir):
try:
os.makedirs(dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(dir):
pass
else:
raise
def get_list_attr(testlist):
"""
To determine the list content
"""
if not testlist:
return Listattr.NULL
listtest = testlist[:]
try:
listtest.remove('')
except ValueError:
pass
pattern_percent = re.compile(r"^([0-9])+(\.)?([0-9])*%$")
pattern_size = re.compile(r"^([0-9])+(\.)?([0-9])*( )*(K)*(M)*(G)*B$")
pattern_number = re.compile(r"^([0-9])+(\.)?([0-9])*$")
def get_patterned_number(pattern, tlist):
count = 0
for item in tlist:
if re.search(pattern, item):
count += 1
return count
if get_patterned_number(pattern_percent, listtest) == len(listtest):
return Listattr.PERCENT
elif get_patterned_number(pattern_size, listtest) == len(listtest):
return Listattr.SIZE
elif get_patterned_number(pattern_number, listtest) == len(listtest):
return Listattr.NUMBERS
else:
return Listattr.STRINGS
def is_list_sequenced(testlist):
"""
Function to tell if list is sequenced
Currently we may have list made up of: Strings ; numbers ; percentage ; time; size
Each has respective way to determine if it's sequenced.
"""
test_list = testlist[:]
try:
test_list.remove('')
except ValueError:
pass
if get_list_attr(testlist) == Listattr.NULL :
return True
elif get_list_attr(testlist) == Listattr.STRINGS :
return (sorted(test_list) == test_list)
elif get_list_attr(testlist) == Listattr.NUMBERS :
list_number = []
for item in test_list:
list_number.append(eval(item))
return (sorted(list_number) == list_number)
elif get_list_attr(testlist) == Listattr.PERCENT :
list_number = []
for item in test_list:
list_number.append(eval(item.strip('%')))
return (sorted(list_number) == list_number)
elif get_list_attr(testlist) == Listattr.SIZE :
list_number = []
# currently SIZE is splitted by space
for item in test_list:
if item.split()[1].upper() == "KB":
list_number.append(1024 * eval(item.split()[0]))
elif item.split()[1].upper() == "MB":
list_number.append(1024 * 1024 * eval(item.split()[0]))
elif item.split()[1].upper() == "GB":
list_number.append(1024 * 1024 * 1024 * eval(item.split()[0]))
else:
list_number.append(eval(item.split()[0]))
return (sorted(list_number) == list_number)
else:
print('Unrecognized list type, please check')
return False
def is_list_inverted(testlist):
"""
Function to tell if list is inverted
Currently we may have list made up of: Strings ; numbers ; percentage ; time; size
Each has respective way to determine if it's inverted.
"""
test_list = testlist[:]
try:
test_list.remove('')
except ValueError:
pass
if get_list_attr(testlist) == Listattr.NULL :
return True
elif get_list_attr(testlist) == Listattr.STRINGS :
return (sorted(test_list, reverse = True) == test_list)
elif get_list_attr(testlist) == Listattr.NUMBERS :
list_number = []
for item in test_list:
list_number.append(eval(item))
return (sorted(list_number, reverse = True) == list_number)
elif get_list_attr(testlist) == Listattr.PERCENT :
list_number = []
for item in test_list:
list_number.append(eval(item.strip('%')))
return (sorted(list_number, reverse = True) == list_number)
elif get_list_attr(testlist) == Listattr.SIZE :
list_number = []
# currently SIZE is splitted by space. such as 0 B; 1 KB; 2 MB
for item in test_list:
if item.split()[1].upper() == "KB":
list_number.append(1024 * eval(item.split()[0]))
elif item.split()[1].upper() == "MB":
list_number.append(1024 * 1024 * eval(item.split()[0]))
elif item.split()[1].upper() == "GB":
list_number.append(1024 * 1024 * 1024 * eval(item.split()[0]))
else:
list_number.append(eval(item.split()[0]))
return (sorted(list_number, reverse = True) == list_number)
else:
print('Unrecognized list type, please check')
return False
def replace_file_content(filename, item, option):
f = open(filename)
lines = f.readlines()
f.close()
output = open(filename, 'w')
for line in lines:
if line.startswith(item):
output.write(item + " = '" + option + "'\n")
else:
output.write(line)
output.close()
def extract_number_from_string(s):
"""
extract the numbers in a string. return type is 'list'
"""
return re.findall(r'([0-9]+)', s)
# Below is decorator derived from toaster backend test code
class NoParsingFilter(logging.Filter):
def filter(self, record):
return record.levelno == 100
def LogResults(original_class):
orig_method = original_class.run
from time import strftime, gmtime
caller = 'toaster'
timestamp = strftime('%Y%m%d%H%M%S',gmtime())
logfile = os.path.join(os.getcwd(),'results-'+caller+'.'+timestamp+'.log')
linkfile = os.path.join(os.getcwd(),'results-'+caller+'.log')
#rewrite the run method of unittest.TestCase to add testcase logging
def run(self, result, *args, **kws):
orig_method(self, result, *args, **kws)
passed = True
testMethod = getattr(self, self._testMethodName)
#if test case is decorated then use it's number, else use it's name
try:
test_case = testMethod.test_case
except AttributeError:
test_case = self._testMethodName
class_name = str(testMethod.im_class).split("'")[1]
#create custom logging level for filtering.
custom_log_level = 100
logging.addLevelName(custom_log_level, 'RESULTS')
def results(self, message, *args, **kws):
if self.isEnabledFor(custom_log_level):
self.log(custom_log_level, message, *args, **kws)
logging.Logger.results = results
logging.basicConfig(filename=logfile,
filemode='w',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%H:%M:%S',
level=custom_log_level)
for handler in logging.root.handlers:
handler.addFilter(NoParsingFilter())
local_log = logging.getLogger(caller)
#check status of tests and record it
for (name, msg) in result.errors:
if (self._testMethodName == str(name).split(' ')[0]) and (class_name in str(name).split(' ')[1]):
local_log.results("Testcase "+str(test_case)+": ERROR")
local_log.results("Testcase "+str(test_case)+":\n"+msg)
passed = False
for (name, msg) in result.failures:
if (self._testMethodName == str(name).split(' ')[0]) and (class_name in str(name).split(' ')[1]):
local_log.results("Testcase "+str(test_case)+": FAILED")
local_log.results("Testcase "+str(test_case)+":\n"+msg)
passed = False
for (name, msg) in result.skipped:
if (self._testMethodName == str(name).split(' ')[0]) and (class_name in str(name).split(' ')[1]):
local_log.results("Testcase "+str(test_case)+": SKIPPED")
passed = False
if passed:
local_log.results("Testcase "+str(test_case)+": PASSED")
# Create symlink to the current log
if os.path.exists(linkfile):
os.remove(linkfile)
os.symlink(logfile, linkfile)
original_class.run = run
return original_class
###########################################
# #
# PART II: base class #
# #
###########################################
@LogResults
class toaster_cases_base(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log = cls.logger_create()
def setUp(self):
self.screenshot_sequence = 1
self.verificationErrors = []
self.accept_next_alert = True
self.host_os = platform.system().lower()
if os.getenv('TOASTER_SUITE'):
self.target_suite = os.getenv('TOASTER_SUITE')
else:
self.target_suite = self.host_os
self.parser = ConfigParser.SafeConfigParser()
self.parser.read('toaster_test.cfg')
self.base_url = eval(self.parser.get('toaster_test_' + self.target_suite, 'toaster_url'))
# create log dir . Currently , we put log files in log/tmp. After all
# test cases are done, move them to log/$datetime dir
self.log_tmp_dir = os.path.abspath(sys.path[0]) + os.sep + 'log' + os.sep + 'tmp'
try:
mkdir_p(self.log_tmp_dir)
except OSError :
logging.error("%(asctime)s Cannot create tmp dir under log, please check your privilege")
# self.log = self.logger_create()
# driver setup
self.setup_browser()
@staticmethod
def logger_create():
log_file = "toaster-auto-" + time.strftime("%Y%m%d%H%M%S") + ".log"
if os.path.exists("toaster-auto.log"): os.remove("toaster-auto.log")
os.symlink(log_file, "toaster-auto.log")
log = logging.getLogger("toaster")
log.setLevel(logging.DEBUG)
fh = logging.FileHandler(filename=log_file, mode='w')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
return log
def setup_browser(self, *browser_path):
self.browser = eval(self.parser.get('toaster_test_' + self.target_suite, 'test_browser'))
print(self.browser)
if self.browser == "firefox":
driver = webdriver.Firefox()
elif self.browser == "chrome":
driver = webdriver.Chrome()
elif self.browser == "ie":
driver = webdriver.Ie()
else:
driver = None
print("unrecognized browser type, please check")
self.driver = driver
self.driver.implicitly_wait(30)
return self.driver
def save_screenshot(self, **log_args):
"""
This function is used to save screen either by os interface or selenium interface.
How to use:
self.save_screenshot(screenshot_type = 'native'/'selenium', log_sub_dir = 'xxx',
append_name = 'stepx')
where native means screenshot func provided by OS,
selenium means screenshot func provided by selenium webdriver
"""
types = [log_args.get('screenshot_type')]
# when no screenshot_type is specified
if types == [None]:
types = ['native', 'selenium']
# normally append_name is used to specify which step..
add_name = log_args.get('append_name')
if not add_name:
add_name = '-'
# normally there's no need to specify sub_dir
sub_dir = log_args.get('log_sub_dir')
if not sub_dir:
# use casexxx as sub_dir name
sub_dir = 'case' + str(self.case_no)
for item in types:
log_dir = self.log_tmp_dir + os.sep + sub_dir
mkdir_p(log_dir)
log_path = log_dir + os.sep + self.browser + '-' +\
item + '-' + add_name + '-' + str(self.screenshot_sequence) + '.png'
if item == 'native':
if self.host_os == "linux":
os.system("scrot " + log_path)
elif self.host_os=="darwin":
os.system("screencapture -x " + log_path)
elif item == 'selenium':
self.driver.get_screenshot_as_file(log_path)
self.screenshot_sequence += 1
def browser_delay(self):
"""
currently this is a workaround for some chrome test.
Sometimes we need a delay to accomplish some operation.
But for firefox, mostly we don't need this.
To be discussed
"""
if self.browser == "chrome":
time.sleep(1)
return
# these functions are not contained in WebDriver class..
def find_element_by_text(self, string):
return self.driver.find_element_by_xpath("//*[text()='" + string + "']")
def find_elements_by_text(self, string):
return self.driver.find_elements_by_xpath("//*[text()='" + string + "']")
def find_element_by_text_in_table(self, table_id, text_string):
"""
This is used to search some certain 'text' in certain table
"""
try:
table_element = self.get_table_element(table_id)
element = table_element.find_element_by_xpath("//*[text()='" + text_string + "']")
except NoSuchElementException as e:
print('no element found')
raise
return element
def find_element_by_link_text_in_table(self, table_id, link_text):
"""
Assume there're multiple suitable "find_element_by_link_text".
In this circumstance we need to specify "table".
"""
try:
table_element = self.get_table_element(table_id)
element = table_element.find_element_by_link_text(link_text)
except NoSuchElementException as e:
print('no element found')
raise
return element
def find_elements_by_link_text_in_table(self, table_id, link_text):
"""
Search link-text in certain table. This helps to narrow down search area.
"""
try:
table_element = self.get_table_element(table_id)
element_list = table_element.find_elements_by_link_text(link_text)
except NoSuchElementException as e:
print('no element found')
raise
return element_list
def find_element_by_partial_link_text_in_table(self, table_id, link_text):
"""
Search element by partial link text in certain table.
"""
try:
table_element = self.get_table_element(table_id)
element = table_element.find_element_by_partial_link_text(link_text)
return element
except NoSuchElementException as e:
print('no element found')
raise
def find_elements_by_partial_link_text_in_table(self, table_id, link_text):
"""
Assume there're multiple suitable "find_partial_element_by_link_text".
"""
try:
table_element = self.get_table_element(table_id)
element_list = table_element.find_elements_by_partial_link_text(link_text)
return element_list
except NoSuchElementException as e:
print('no element found')
raise
def find_element_by_xpath_in_table(self, table_id, xpath):
"""
This helps to narrow down search area. Especially useful when dealing with pop-up form.
"""
try:
table_element = self.get_table_element(table_id)
element = table_element.find_element_by_xpath(xpath)
except NoSuchElementException as e:
print('no element found')
raise
return element
def find_elements_by_xpath_in_table(self, table_id, xpath):
"""
This helps to narrow down search area. Especially useful when dealing with pop-up form.
"""
try:
table_element = self.get_table_element(table_id)
element_list = table_element.find_elements_by_xpath(xpath)
except NoSuchElementException as e:
print('no elements found')
raise
return element_list
def shortest_xpath(self, pname, pvalue):
return "//*[@" + pname + "='" + pvalue + "']"
#usually elements in the same column are with same class name. for instance: class="outcome" .TBD
def get_table_column_text(self, attr_name, attr_value):
c_xpath = self.shortest_xpath(attr_name, attr_value)
elements = self.driver.find_elements_by_xpath(c_xpath)
c_list = []
for element in elements:
c_list.append(element.text)
return c_list
def get_table_column_text_by_column_number(self, table_id, column_number):
c_xpath = "//*[@id='" + table_id + "']//td[" + str(column_number) + "]"
elements = self.driver.find_elements_by_xpath(c_xpath)
c_list = []
for element in elements:
c_list.append(element.text)
return c_list
def get_table_head_text(self, *table_id):
#now table_id is a tuple...
if table_id:
thead_xpath = "//*[@id='" + table_id[0] + "']//thead//th[text()]"
elements = self.driver.find_elements_by_xpath(thead_xpath)
c_list = []
for element in elements:
if element.text:
c_list.append(element.text)
return c_list
#default table on page
else:
return self.driver.find_element_by_xpath("//*/table/thead").text
def get_table_element(self, table_id, *coordinate):
if len(coordinate) == 0:
#return whole-table element
element_xpath = "//*[@id='" + table_id + "']"
try:
element = self.driver.find_element_by_xpath(element_xpath)
except NoSuchElementException as e:
raise
return element
row = coordinate[0]
if len(coordinate) == 1:
#return whole-row element
element_xpath = "//*[@id='" + table_id + "']/tbody/tr[" + str(row) + "]"
try:
element = self.driver.find_element_by_xpath(element_xpath)
except NoSuchElementException as e:
return False
return element
#now we are looking for an element with specified X and Y
column = coordinate[1]
element_xpath = "//*[@id='" + table_id + "']/tbody/tr[" + str(row) + "]/td[" + str(column) + "]"
try:
element = self.driver.find_element_by_xpath(element_xpath)
except NoSuchElementException as e:
return False
return element
def get_table_data(self, table_id, row_count, column_count):
row = 1
Lists = []
while row <= row_count:
column = 1
row_content=[]
while column <= column_count:
s= "//*[@id='" + table_id + "']/tbody/tr[" + str(row) +"]/td[" + str(column) + "]"
v = self.driver.find_element_by_xpath(s).text
row_content.append(v)
column = column + 1
print("row_content=",row_content)
Lists.extend(row_content)
print(Lists[row-1][0])
row = row + 1
return Lists
# The is_xxx_present functions only returns True/False
# All the log work is done in test procedure, so we can easily trace back
# using logging
def is_text_present (self, patterns):
for pattern in patterns:
if str(pattern) not in self.driver.page_source:
print('Text "'+pattern+'" is missing')
return False
return True
def is_element_present(self, how, what):
try:
self.driver.find_element(how, what)
except NoSuchElementException as e:
print('Could not find element '+str(what)+' by ' + str(how))
return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def get_case_number(self):
"""
what case are we running now
"""
funcname = sys._getframe(1).f_code.co_name
caseno_str = funcname.strip('test_')
try:
caseno = int(caseno_str)
except ValueError:
print("get case number error! please check if func name is test_xxx")
return False
return caseno
def tearDown(self):
self.log.info(' END: CASE %s log \n\n' % str(self.case_no))
self.driver.quit()
self.assertEqual([], self.verificationErrors)
###################################################################
# #
# PART III: test cases #
# please refer to #
# https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=xxx #
# #
###################################################################
# Note: to comply with the unittest framework, we call these test_xxx functions
# from run_toastercases.py to avoid calling setUp() and tearDown() multiple times
class toaster_cases(toaster_cases_base):
##############
# CASE 901 #
##############
def test_901(self):
# the reason why get_case_number is not in setUp function is that
# otherwise it returns "setUp" instead of "test_xxx"
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
# open all columns
self.driver.find_element_by_id("edit-columns-button").click()
# adding explicitly wait for chromedriver..-_-
self.browser_delay()
self.driver.find_element_by_id("started_on").click()
self.browser_delay()
self.driver.find_element_by_id("time").click()
self.driver.find_element_by_id("edit-columns-button").click()
# dict: {lint text name : actual class name}
table_head_dict = {'Outcome':'outcome', 'Recipe':'target', 'Machine':'machine', 'Started on':'started_on', 'Completed on':'completed_on', \
'Errors':'errors_no', 'Warnings':'warnings_no', 'Time':'time'}
for key in table_head_dict:
try:
self.driver.find_element_by_link_text(key).click()
except Exception as e:
self.log.error("%s cannot be found on page" % key)
raise
column_list = self.get_table_column_text("class", table_head_dict[key])
# after 1st click, the list should be either sequenced or inverted, but we don't have a "default order" here
# the point is, after another click, it should be another order
if is_list_inverted(column_list):
self.driver.find_element_by_link_text(key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_sequenced(column_list), msg=("%s column not in order" % key))
else:
self.assertTrue(is_list_sequenced(column_list), msg=("%s column not sequenced" % key))
self.driver.find_element_by_link_text(key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_inverted(column_list), msg=("%s column not inverted" % key))
self.log.info("case passed")
##############
# CASE 902 #
##############
def test_902(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
# Could add more test patterns here in the future. Also, could search some items other than target column in future..
patterns = ["minimal", "sato"]
for pattern in patterns:
ori_target_column_texts = self.get_table_column_text("class", "target")
print(ori_target_column_texts)
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys(pattern)
self.driver.find_element_by_id("search-button").click()
new_target_column_texts = self.get_table_column_text("class", "target")
# if nothing found, we still count it as "pass"
if new_target_column_texts:
for text in new_target_column_texts:
self.assertTrue(text.find(pattern), msg=("%s item doesn't exist " % pattern))
self.driver.find_element_by_css_selector("i.icon-remove").click()
target_column_texts = self.get_table_column_text("class", "target")
self.assertTrue(ori_target_column_texts == target_column_texts, msg=("builds changed after operations"))
##############
# CASE 903 #
##############
def test_903(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
# when opening a new page, "started_on" is not displayed by default
self.driver.find_element_by_id("edit-columns-button").click()
# currently all the delay are for chrome driver -_-
self.browser_delay()
self.driver.find_element_by_id("started_on").click()
self.driver.find_element_by_id("edit-columns-button").click()
# step 4
items = ["Outcome", "Completed on", "Started on"]
for item in items:
try:
temp_element = self.find_element_by_text_in_table('otable', item)
# this is how we find "filter icon" in the same level as temp_element(where "a" means clickable, "i" means icon)
self.assertTrue(temp_element.find_element_by_xpath("..//*/a/i[@class='icon-filter filtered']"))
except Exception as e:
self.assertFalse(True, msg=(" %s cannot be found! %s" % (item, e)))
raise
# step 5-6
temp_element = self.find_element_by_link_text_in_table('otable', 'Outcome')
temp_element.find_element_by_xpath("..//*/a/i[@class='icon-filter filtered']").click()
self.browser_delay()
# the 2nd option, whatever it is
self.driver.find_element_by_xpath("(//input[@name='filter'])[2]").click()
# click "Apply" button
self.driver.find_element_by_xpath("//*[@id='filter_outcome']//*[text()='Apply']").click()
# save screen here
time.sleep(1)
self.save_screenshot(screenshot_type='selenium', append_name='step5')
temp_element = self.find_element_by_link_text_in_table('otable', 'Completed on')
temp_element.find_element_by_xpath("..//*/a/i[@class='icon-filter filtered']").click()
self.browser_delay()
self.driver.find_element_by_xpath("//*[@id='filter_completed_on']//*[text()='Apply']").click()
# save screen here to compare to previous one
# please note that for chrome driver, need a little break before saving
# screen here -_-
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step6')
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys("core-image")
self.driver.find_element_by_id("search-button").click()
##############
# CASE 904 #
##############
def test_904(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_partial_link_text("core-image").click()
self.driver.find_element_by_link_text("Tasks").click()
self.table_name = 'otable'
# This is how we find the "default" rows-number!
rows_displayed = int(Select(self.driver.find_element_by_css_selector("select.pagesize")).first_selected_option.text)
print(rows_displayed)
self.assertTrue(self.get_table_element(self.table_name, rows_displayed), msg=("not enough rows displayed"))
self.assertFalse(self.get_table_element(self.table_name, rows_displayed + 1), \
msg=("more rows displayed than expected"))
# Search text box background text is "Search tasks"
self.assertTrue(self.driver.find_element_by_xpath("//*[@id='searchform']/*[@placeholder='Search tasks']"),\
msg=("background text doesn't exist"))
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys("busybox")
self.driver.find_element_by_id("search-button").click()
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step5')
self.driver.find_element_by_css_selector("i.icon-remove").click()
# Save screen here
self.save_screenshot(screenshot_type='selenium', append_name='step5_2')
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("cpu_used").click()
self.driver.find_element_by_id("disk_io").click()
self.driver.find_element_by_id("recipe_version").click()
self.driver.find_element_by_id("time_taken").click()
self.driver.find_element_by_id("edit-columns-button").click()
# The operation is the same as case901
# dict: {lint text name : actual class name}
table_head_dict = {'Order':'order', 'Recipe':'recipe_name', 'Task':'task_name', 'Executed':'executed', \
'Outcome':'outcome', 'Cache attempt':'cache_attempt', 'Time (secs)':'time_taken', 'CPU usage':'cpu_used', \
'Disk I/O (ms)':'disk_io'}
for key in table_head_dict:
# This is tricky here: we are doing so because there may be more than 1
# same-name link_text in one page. So we only find element inside the table
self.find_element_by_link_text_in_table(self.table_name, key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
# after 1st click, the list should be either sequenced or inverted, but we don't have a "default order" here
# the point is, after another click, it should be another order
# the first case is special:this means every item in column_list is the same, so
# after one click, either sequenced or inverted will be fine
if (is_list_inverted(column_list) and is_list_sequenced(column_list)) \
or (not column_list) :
self.find_element_by_link_text_in_table(self.table_name, key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_sequenced(column_list) or is_list_inverted(column_list), \
msg=("%s column not in any order" % key))
elif is_list_inverted(column_list):
self.find_element_by_link_text_in_table(self.table_name, key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_sequenced(column_list), msg=("%s column not in order" % key))
else:
self.assertTrue(is_list_sequenced(column_list), msg=("%s column not in order" % key))
self.find_element_by_link_text_in_table(self.table_name, key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_inverted(column_list), msg=("%s column not inverted" % key))
# step 8-10
# filter dict: {link text name : filter table name in xpath}
filter_dict = {'Executed':'filter_executed', 'Outcome':'filter_outcome', 'Cache attempt':'filter_cache_attempt'}
for key in filter_dict:
temp_element = self.find_element_by_link_text_in_table(self.table_name, key)
# find the filter icon besides it.
# And here we must have break (1 sec) to get the popup stuff
temp_element.find_element_by_xpath("..//*[@class='icon-filter filtered']").click()
self.browser_delay()
avail_options = self.driver.find_elements_by_xpath("//*[@id='" + filter_dict[key] + "']//*[@name='filter'][not(@disabled)]")
for number in range(0, len(avail_options)):
avail_options[number].click()
self.browser_delay()
# click "Apply"
self.driver.find_element_by_xpath("//*[@id='" + filter_dict[key] + "']//*[@type='submit']").click()
# insert screen capture here
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step8')
# after the last option was clicked, we don't need operation below anymore
if number < len(avail_options)-1:
try:
temp_element = self.find_element_by_link_text_in_table(self.table_name, key)
temp_element.find_element_by_xpath("..//*[@class='icon-filter filtered']").click()
avail_options = self.driver.find_elements_by_xpath("//*[@id='" + filter_dict[key] + "']//*[@name='filter'][not(@disabled)]")
except:
print("in exception")
self.find_element_by_text("Show all tasks").click()
# self.driver.find_element_by_xpath("//*[@id='searchform']/button[2]").click()
temp_element = self.find_element_by_link_text_in_table(self.table_name, key)
temp_element.find_element_by_xpath("..//*[@class='icon-filter filtered']").click()
avail_options = self.driver.find_elements_by_xpath("//*[@id='" + filter_dict[key] + "']//*[@name='filter'][not(@disabled)]")
self.browser_delay()
# step 11
for item in ['order', 'task_name', 'executed', 'outcome', 'recipe_name', 'recipe_version']:
try:
self.find_element_by_xpath_in_table(self.table_name, "./tbody/tr[1]/*[@class='" + item + "']/a").click()
except NoSuchElementException as e:
# let it go...
print('no item in the colum' + item)
# insert screen shot here
self.save_screenshot(screenshot_type='selenium', append_name='step11')
self.driver.back()
# step 12-14
# about test_dict: please refer to testcase 904 requirement step 12-14
test_dict = {
'Time':{
'class':'time_taken',
'check_head_list':['Recipe', 'Task', 'Executed', 'Outcome', 'Time (secs)'],
'check_column_list':['cpu_used', 'cache_attempt', 'disk_io', 'order', 'recipe_version']
},
'CPU usage':{
'class':'cpu_used',
'check_head_list':['Recipe', 'Task', 'Executed', 'Outcome', 'CPU usage'],
'check_column_list':['cache_attempt', 'disk_io', 'order', 'recipe_version', 'time_taken']
},
'Disk I/O':{
'class':'disk_io',
'check_head_list':['Recipe', 'Task', 'Executed', 'Outcome', 'Disk I/O (ms)'],
'check_column_list':['cpu_used', 'cache_attempt', 'order', 'recipe_version', 'time_taken']
}
}
for key in test_dict:
self.find_element_by_partial_link_text_in_table('nav', 'core-image').click()
self.find_element_by_link_text_in_table('nav', key).click()
head_list = self.get_table_head_text('otable')
for item in test_dict[key]['check_head_list']:
self.assertTrue(item in head_list, msg=("%s not in head row" % item))
column_list = self.get_table_column_text('class', test_dict[key]['class'])
self.assertTrue(is_list_inverted(column_list), msg=("%s column not inverted" % key))
self.driver.find_element_by_id("edit-columns-button").click()
for item2 in test_dict[key]['check_column_list']:
self.driver.find_element_by_id(item2).click()
self.driver.find_element_by_id("edit-columns-button").click()
# TBD: save screen here
##############
# CASE 906 #
##############
def test_906(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.find_element_by_link_text_in_table('nav', 'Packages').click()
# find "bash" in first column (Packages)
self.driver.find_element_by_xpath("//*[@id='otable']//td[1]//*[text()='bash']").click()
# save sceen here to observe...
# step 6
self.driver.find_element_by_partial_link_text("Generated files").click()
head_list = self.get_table_head_text('otable')
for item in ['File', 'Size']:
self.assertTrue(item in head_list, msg=("%s not in head row" % item))
c_list = self.get_table_column_text('class', 'path')
self.assertTrue(is_list_sequenced(c_list), msg=("column not in order"))
# step 7
self.driver.find_element_by_partial_link_text("Runtime dependencies").click()
# save sceen here to observe...
# note that here table name is not 'otable'
head_list = self.get_table_head_text('dependencies')
for item in ['Package', 'Version', 'Size']:
self.assertTrue(item in head_list, msg=("%s not in head row" % item))
c_list = self.get_table_column_text_by_column_number('dependencies', 1)
self.assertTrue(is_list_sequenced(c_list), msg=("list not in order"))
texts = ['Size', 'License', 'Recipe', 'Recipe version', 'Layer', \
'Layer commit']
self.failUnless(self.is_text_present(texts))
##############
# CASE 910 #
##############
def test_910(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
image_type="core-image-minimal"
test_package1="busybox"
test_package2="lib"
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text(image_type).click()
self.driver.find_element_by_link_text("Recipes").click()
self.save_screenshot(screenshot_type='selenium', append_name='step3')
self.table_name = 'otable'
# This is how we find the "default" rows-number!
rows_displayed = int(Select(self.driver.find_element_by_css_selector("select.pagesize")).first_selected_option.text)
print(rows_displayed)
self.assertTrue(self.get_table_element(self.table_name, rows_displayed))
self.assertFalse(self.get_table_element(self.table_name, rows_displayed + 1))
# Check the default table is sorted by Recipe
tasks_column_count = len(self.driver.find_elements_by_xpath("/html/body/div[2]/div/div[2]/div[2]/table/tbody/tr/td[1]"))
print(tasks_column_count)
default_column_list = self.get_table_column_text_by_column_number(self.table_name, 1)
#print default_column_list
self.assertTrue(is_list_sequenced(default_column_list))
# Search text box background text is "Search recipes"
self.assertTrue(self.driver.find_element_by_xpath("//*[@id='searchform']/*[@placeholder='Search recipes']"))
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys(test_package1)
self.driver.find_element_by_id("search-button").click()
# Save screen here
self.save_screenshot(screenshot_type='selenium', append_name='step4')
self.driver.find_element_by_css_selector("i.icon-remove").click()
self.save_screenshot(screenshot_type='selenium', append_name='step4_2')
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("depends_on").click()
self.driver.find_element_by_id("layer_version__branch").click()
self.driver.find_element_by_id("layer_version__layer__commit").click()
self.driver.find_element_by_id("depends_by").click()
self.driver.find_element_by_id("edit-columns-button").click()
self.find_element_by_link_text_in_table(self.table_name, 'Recipe').click()
# Check the inverted table by Recipe
# Recipe doesn't have class name
#inverted_tasks_column_count = len(self.driver.find_elements_by_xpath("/html/body/div[2]/div/div[2]/div[2]/table/tbody/tr/td[1]"))
#print inverted_tasks_column_count
#inverted_column_list = self.get_table_column_text_by_column_number(self.table_name, 1)
#print inverted_column_list
#self.driver.find_element_by_partial_link_text("zlib").click()
#self.driver.back()
#self.assertTrue(is_list_inverted(inverted_column_list))
#self.find_element_by_link_text_in_table(self.table_name, 'Recipe').click()
table_head_dict = {'Recipe':'recipe__name', 'Recipe file':'recipe_file', 'Section':'recipe_section', \
'License':'recipe_license', 'Layer':'layer_version__layer__name', \
'Layer branch':'layer_version__branch'}
for key in table_head_dict:
self.find_element_by_link_text_in_table(self.table_name, key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
if (is_list_inverted(column_list) and is_list_sequenced(column_list)) \
or (not column_list) :
self.find_element_by_link_text_in_table(self.table_name, key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_sequenced(column_list) or is_list_inverted(column_list))
self.driver.find_element_by_partial_link_text("acl").click()
self.driver.back()
self.assertTrue(is_list_sequenced(column_list) or is_list_inverted(column_list))
# Search text box background text is "Search recipes"
self.assertTrue(self.driver.find_element_by_xpath("//*[@id='searchform']/*[@placeholder='Search recipes']"))
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys(test_package2)
self.driver.find_element_by_id("search-button").click()
column_search_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_sequenced(column_search_list) or is_list_inverted(column_search_list))
self.driver.find_element_by_css_selector("i.icon-remove").click()
elif is_list_inverted(column_list):
self.find_element_by_link_text_in_table(self.table_name, key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_sequenced(column_list))
self.driver.find_element_by_partial_link_text("acl").click()
self.driver.back()
self.assertTrue(is_list_sequenced(column_list))
# Search text box background text is "Search recipes"
self.assertTrue(self.driver.find_element_by_xpath("//*[@id='searchform']/*[@placeholder='Search recipes']"))
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys(test_package2)
self.driver.find_element_by_id("search-button").click()
column_search_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_sequenced(column_search_list))
self.driver.find_element_by_css_selector("i.icon-remove").click()
else:
self.assertTrue(is_list_sequenced(column_list), msg=("list %s not sequenced" % key))
self.find_element_by_link_text_in_table(self.table_name, key).click()
column_list = self.get_table_column_text("class", table_head_dict[key])
self.assertTrue(is_list_inverted(column_list))
try:
self.driver.find_element_by_partial_link_text("acl").click()
except:
self.driver.find_element_by_partial_link_text("zlib").click()
self.driver.back()
self.assertTrue(is_list_inverted(column_list))
# Search text box background text is "Search recipes"
self.assertTrue(self.driver.find_element_by_xpath("//*[@id='searchform']/*[@placeholder='Search recipes']"))
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys(test_package2)
self.driver.find_element_by_id("search-button").click()
column_search_list = self.get_table_column_text("class", table_head_dict[key])
#print column_search_list
self.assertTrue(is_list_inverted(column_search_list))
self.driver.find_element_by_css_selector("i.icon-remove").click()
# Bug 5919
for key in table_head_dict:
print(key)
self.find_element_by_link_text_in_table(self.table_name, key).click()
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id(table_head_dict[key]).click()
self.driver.find_element_by_id("edit-columns-button").click()
self.browser_delay()
# After hide the column, the default table should be sorted by Recipe
tasks_column_count = len(self.driver.find_elements_by_partial_link_text("acl"))
#print tasks_column_count
default_column_list = self.get_table_column_text_by_column_number(self.table_name, 1)
#print default_column_list
self.assertTrue(is_list_sequenced(default_column_list))
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("recipe_file").click()
self.driver.find_element_by_id("recipe_section").click()
self.driver.find_element_by_id("recipe_license").click()
self.driver.find_element_by_id("layer_version__layer__name").click()
self.driver.find_element_by_id("edit-columns-button").click()
##############
# CASE 911 #
##############
def test_911(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.find_element_by_link_text_in_table('nav', 'Recipes').click()
# step 3-5
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys("lib")
self.driver.find_element_by_id("search-button").click()
# save screen here for observation
self.save_screenshot(screenshot_type='selenium', append_name='step5')
# step 6
self.driver.find_element_by_css_selector("i.icon-remove").click()
self.driver.find_element_by_id("search").clear()
# we deliberately want "no result" here
self.driver.find_element_by_id("search").send_keys("no such input")
self.driver.find_element_by_id("search-button").click()
try:
self.find_element_by_text("Show all recipes").click()
except:
self.fail(msg='Could not identify blank page elements')
##############
# CASE 912 #
##############
def test_912(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.find_element_by_link_text_in_table('nav', 'Recipes').click()
# step 3
head_list = self.get_table_head_text('otable')
for item in ['Recipe', 'Recipe version', 'Recipe file', 'Section', 'License', 'Layer']:
self.assertTrue(item in head_list, msg=("item %s not in head row" % item))
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("depends_on").click()
self.driver.find_element_by_id("layer_version__branch").click()
self.driver.find_element_by_id("layer_version__layer__commit").click()
self.driver.find_element_by_id("depends_by").click()
self.driver.find_element_by_id("edit-columns-button").click()
# check if columns selected above is shown
check_list = ['Dependencies', 'Layer branch', 'Layer commit', 'Reverse dependencies']
head_list = self.get_table_head_text('otable')
time.sleep(2)
print(head_list)
for item in check_list:
self.assertTrue(item in head_list, msg=("item %s not in head row" % item))
# un-check 'em all
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("depends_on").click()
self.driver.find_element_by_id("layer_version__branch").click()
self.driver.find_element_by_id("layer_version__layer__commit").click()
self.driver.find_element_by_id("depends_by").click()
self.driver.find_element_by_id("edit-columns-button").click()
# don't exist any more
head_list = self.get_table_head_text('otable')
for item in check_list:
self.assertFalse(item in head_list, msg=("item %s should not be in head row" % item))
##############
# CASE 913 #
##############
def test_913(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.find_element_by_link_text_in_table('nav', 'Recipes').click()
# step 3
head_list = self.get_table_head_text('otable')
for item in ['Recipe', 'Recipe version', 'Recipe file', 'Section', 'License', 'Layer']:
self.assertTrue(item in head_list, msg=("item %s not in head row" % item))
# step 4
self.driver.find_element_by_id("edit-columns-button").click()
# save screen
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step4')
self.driver.find_element_by_id("edit-columns-button").click()
##############
# CASE 914 #
##############
def test_914(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
image_type="core-image-minimal"
test_package1="busybox"
test_package2="gdbm"
test_package3="gettext-native"
driver = self.driver
driver.maximize_window()
driver.get(self.base_url)
driver.find_element_by_link_text(image_type).click()
driver.find_element_by_link_text("Recipes").click()
driver.find_element_by_link_text(test_package1).click()
self.table_name = 'information'
tasks_row_count = len(driver.find_elements_by_xpath("//*[@id='"+self.table_name+"']/table/tbody/tr/td[1]"))
tasks_column_count = len(driver.find_elements_by_xpath("//*[@id='"+self.table_name+"']/table/tbody/tr[1]/td"))
print('rows: '+str(tasks_row_count))
print('columns: '+str(tasks_column_count))
Tasks_column = self.get_table_column_text_by_column_number(self.table_name, 2)
print ("Tasks_column=", Tasks_column)
key_tasks=["do_fetch", "do_unpack", "do_patch", "do_configure", "do_compile", "do_install", "do_package", "do_build"]
i = 0
while i < len(key_tasks):
if key_tasks[i] not in Tasks_column:
print ("Error! Missing key task: %s" % key_tasks[i])
else:
print ("%s is in tasks" % key_tasks[i])
i = i + 1
if Tasks_column.index(key_tasks[0]) != 0:
print ("Error! %s is not in the right position" % key_tasks[0])
else:
print ("%s is in right position" % key_tasks[0])
if Tasks_column[-1] != key_tasks[-1]:
print ("Error! %s is not in the right position" % key_tasks[-1])
else:
print ("%s is in right position" % key_tasks[-1])
driver.find_element_by_partial_link_text("Packages (").click()
packages_name = driver.find_element_by_partial_link_text("Packages (").text
print(packages_name)
packages_num = int(filter(str.isdigit, repr(packages_name)))
print(packages_num)
#switch the table to show more than 10 rows at a time
self.driver.find_element_by_xpath("//*[@id='packages-built']/div[1]/div/select").click()
Select(driver.find_element_by_xpath("//*[@id='packages-built']/div[1]/div/select")).select_by_value('150')
self.driver.find_element_by_xpath("//*[@id='packages-built']/div[1]/div/select").send_keys(Keys.ENTER)
packages_row_count = len(driver.find_elements_by_xpath("//*[@id='otable']/tbody/tr/td[1]"))
print(packages_row_count)
if packages_num != packages_row_count:
print ("Error! The packages number is not correct")
else:
print ("The packages number is correct")
driver.find_element_by_partial_link_text("Build dependencies (").click()
depends_name = driver.find_element_by_partial_link_text("Build dependencies (").text
print(depends_name)
depends_num = int(list(filter(str.isdigit, repr(depends_name))))
print(depends_num)
if depends_num == 0:
depends_message = repr(driver.find_element_by_css_selector("div.alert.alert-info").text)
print(depends_message)
if depends_message.find("has no build dependencies.") < 0:
print ("Error! The message isn't expected.")
else:
print ("The message is expected")
else:
depends_row_count = len(driver.find_elements_by_xpath("//*[@id='dependencies']/table/tbody/tr/td[1]"))
print(depends_row_count)
if depends_num != depends_row_count:
print ("Error! The dependent packages number is not correct")
else:
print ("The dependent packages number is correct")
driver.find_element_by_partial_link_text("Reverse build dependencies (").click()
rdepends_name = driver.find_element_by_partial_link_text("Reverse build dependencies (").text
print(rdepends_name)
rdepends_num = int(filter(str.isdigit, repr(rdepends_name)))
print(rdepends_num)
if rdepends_num == 0:
rdepends_message = repr(driver.find_element_by_css_selector("#brought-in-by > div.alert.alert-info").text)
print(rdepends_message)
if rdepends_message.find("has no reverse build dependencies.") < 0:
print ("Error! The message isn't expected.")
else:
print ("The message is expected")
else:
print ("The reverse dependent packages number is correct")
driver.find_element_by_link_text("Recipes").click()
driver.find_element_by_link_text(test_package2).click()
driver.find_element_by_partial_link_text("Packages (").click()
driver.find_element_by_partial_link_text("Build dependencies (").click()
driver.find_element_by_partial_link_text("Reverse build dependencies (").click()
driver.find_element_by_link_text("Recipes").click()
driver.find_element_by_link_text(test_package3).click()
native_tasks_row_count = len(driver.find_elements_by_xpath("//*[@id='information']/table/tbody/tr/td[1]"))
native_tasks_column_count = len(driver.find_elements_by_xpath("//*[@id='information']/table/tbody/tr[1]/td"))
print(native_tasks_row_count)
print(native_tasks_column_count)
Native_Tasks_column = self.get_table_column_text_by_column_number(self.table_name, 2)
print ("Native_Tasks_column=", Native_Tasks_column)
native_key_tasks=["do_fetch", "do_unpack", "do_patch", "do_configure", "do_compile", "do_install", "do_build"]
i = 0
while i < len(native_key_tasks):
if native_key_tasks[i] not in Native_Tasks_column:
print ("Error! Missing key task: %s" % native_key_tasks[i])
else:
print ("%s is in tasks" % native_key_tasks[i])
i = i + 1
if Native_Tasks_column.index(native_key_tasks[0]) != 0:
print ("Error! %s is not in the right position" % native_key_tasks[0])
else:
print ("%s is in right position" % native_key_tasks[0])
if Native_Tasks_column[-1] != native_key_tasks[-1]:
print ("Error! %s is not in the right position" % native_key_tasks[-1])
else:
print ("%s is in right position" % native_key_tasks[-1])
driver.find_element_by_partial_link_text("Packages (").click()
native_packages_name = driver.find_element_by_partial_link_text("Packages (").text
print(native_packages_name)
native_packages_num = int(filter(str.isdigit, repr(native_packages_name)))
print(native_packages_num)
if native_packages_num != 0:
print ("Error! Native task shouldn't have any packages.")
else:
native_package_message = repr(driver.find_element_by_css_selector("#packages-built > div.alert.alert-info").text)
print(native_package_message)
if native_package_message.find("does not build any packages.") < 0:
print ("Error! The message for native task isn't expected.")
else:
print ("The message for native task is expected.")
driver.find_element_by_partial_link_text("Build dependencies (").click()
native_depends_name = driver.find_element_by_partial_link_text("Build dependencies (").text
print(native_depends_name)
native_depends_num = int(filter(str.isdigit, repr(native_depends_name)))
print(native_depends_num)
native_depends_row_count = len(driver.find_elements_by_xpath("//*[@id='dependencies']/table/tbody/tr/td[1]"))
print(native_depends_row_count)
if native_depends_num != native_depends_row_count:
print ("Error! The dependent packages number is not correct")
else:
print ("The dependent packages number is correct")
driver.find_element_by_partial_link_text("Reverse build dependencies (").click()
native_rdepends_name = driver.find_element_by_partial_link_text("Reverse build dependencies (").text
print(native_rdepends_name)
native_rdepends_num = int(filter(str.isdigit, repr(native_rdepends_name)))
print(native_rdepends_num)
native_rdepends_row_count = len(driver.find_elements_by_xpath("//*[@id='brought-in-by']/table/tbody/tr/td[1]"))
print(native_rdepends_row_count)
if native_rdepends_num != native_rdepends_row_count:
print ("Error! The reverse dependent packages number is not correct")
else:
print ("The reverse dependent packages number is correct")
driver.find_element_by_link_text("Recipes").click()
##############
# CASE 915 #
##############
def test_915(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
# step 3
self.find_element_by_link_text_in_table('nav', 'Configuration').click()
self.driver.find_element_by_link_text("BitBake variables").click()
# step 4
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys("lib")
self.driver.find_element_by_id("search-button").click()
# save screen to see result
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step4')
# step 5
self.driver.find_element_by_css_selector("i.icon-remove").click()
head_list = self.get_table_head_text('otable')
print(head_list)
print(len(head_list))
self.assertTrue(head_list == ['Variable', 'Value', 'Set in file', 'Description'], \
msg=("head row contents wrong"))
# step 8
# search other string. and click "Variable" to re-sort, check if table
# head is still the same
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys("poky")
self.driver.find_element_by_id("search-button").click()
self.find_element_by_link_text_in_table('otable', 'Variable').click()
head_list = self.get_table_head_text('otable')
self.assertTrue(head_list == ['Variable', 'Value', 'Set in file', 'Description'], \
msg=("head row contents wrong"))
self.find_element_by_link_text_in_table('otable', 'Variable').click()
head_list = self.get_table_head_text('otable')
self.assertTrue(head_list == ['Variable', 'Value', 'Set in file', 'Description'], \
msg=("head row contents wrong"))
##############
# CASE 916 #
##############
def test_916(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
# step 2-3
self.find_element_by_link_text_in_table('nav', 'Configuration').click()
self.driver.find_element_by_link_text("BitBake variables").click()
variable_list = self.get_table_column_text('class', 'variable_name')
self.assertTrue(is_list_sequenced(variable_list), msg=("list not in order"))
# step 4
self.find_element_by_link_text_in_table('otable', 'Variable').click()
variable_list = self.get_table_column_text('class', 'variable_name')
self.assertTrue(is_list_inverted(variable_list), msg=("list not inverted"))
self.find_element_by_link_text_in_table('otable', 'Variable').click()
# step 5
# searching won't change the sequentiality
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys("lib")
self.driver.find_element_by_id("search-button").click()
variable_list = self.get_table_column_text('class', 'variable_name')
self.assertTrue(is_list_sequenced(variable_list), msg=("list not in order"))
##############
# CASE 923 #
##############
def test_923(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
# Step 2
# default sequence in "Completed on" column is inverted
c_list = self.get_table_column_text('class', 'completed_on')
self.assertTrue(is_list_inverted(c_list), msg=("list not inverted"))
# step 3
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("started_on").click()
self.driver.find_element_by_id("time").click()
self.driver.find_element_by_id("edit-columns-button").click()
head_list = self.get_table_head_text('otable')
for item in ['Outcome', 'Recipe', 'Machine', 'Started on', 'Completed on', 'Failed tasks', 'Errors', 'Warnings', 'Time', "Image files", "Project"]:
self.failUnless(item in head_list, msg=item+' is missing from table head.')
##############
# CASE 924 #
##############
def test_924(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
# Please refer to case 924 requirement
# default sequence in "Completed on" column is inverted
c_list = self.get_table_column_text('class', 'completed_on')
self.assertTrue(is_list_inverted(c_list), msg=("list not inverted"))
# Step 4
# click Errors , order in "Completed on" should be disturbed. Then hide
# error column to check if order in "Completed on" can be restored
#THIS TEST IS NO LONGER VALID DUE TO DESIGN CHANGES. LEAVING IN PENDING UPDATES TO DESIGN
#self.find_element_by_link_text_in_table('otable', 'Errors').click()
#self.driver.find_element_by_id("edit-columns-button").click()
#self.driver.find_element_by_id("errors_no").click()
#self.driver.find_element_by_id("edit-columns-button").click()
# Note: without time.sleep here, there'll be unpredictable error..TBD
time.sleep(1)
c_list = self.get_table_column_text('class', 'completed_on')
self.assertTrue(is_list_inverted(c_list), msg=("list not inverted"))
##############
# CASE 940 #
##############
def test_940(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
# Step 2-3
self.find_element_by_link_text_in_table('nav', 'Packages').click()
check_head_list = ['Package', 'Package version', 'Size', 'Recipe']
head_list = self.get_table_head_text('otable')
self.assertTrue(head_list == check_head_list, msg=("head row not as expected"))
# Step 4
# pulldown menu
option_ids = ['recipe__layer_version__layer__name', 'recipe__layer_version__branch', \
'recipe__layer_version__layer__commit', 'license', 'recipe__version']
self.driver.find_element_by_id("edit-columns-button").click()
for item in option_ids:
if not self.driver.find_element_by_id(item).is_selected():
self.driver.find_element_by_id(item).click()
self.driver.find_element_by_id("edit-columns-button").click()
# save screen here to observe that 'Package' and 'Package version' is
# not selectable
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step4')
##############
# CASE 941 #
##############
def test_941(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
# Step 2-3
self.find_element_by_link_text_in_table('nav', 'Packages').click()
# column -- Package
column_list = self.get_table_column_text_by_column_number('otable', 1)
self.assertTrue(is_list_sequenced(column_list), msg=("list not in order"))
self.find_element_by_link_text_in_table('otable', 'Size').click()
##############
# CASE 942 #
##############
def test_942(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.driver.find_element_by_link_text("Packages").click()
#get initial table header
head_list = self.get_table_head_text('otable')
#remove the Recipe column from table header
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("recipe__name").click()
self.driver.find_element_by_id("edit-columns-button").click()
#get modified table header
new_head = self.get_table_head_text('otable')
self.assertTrue(head_list > new_head)
##############
# CASE 943 #
##############
def test_943(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.driver.find_element_by_link_text("Packages").click()
#search for the "bash" package -> this should definitely be present
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys("bash")
self.driver.find_element_by_id("search-button").click()
#check for the search result message "XX packages found"
self.assertTrue(self.is_text_present("packages found"), msg=("no packages found text"))
##############
# CASE 944 #
##############
def test_944(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
# step 1: test Recipes page stuff
self.driver.find_element_by_link_text("Recipes").click()
# for these 3 items, default status is not-checked
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("layer_version__branch").click()
self.driver.find_element_by_id("layer_version__layer__commit").click()
self.driver.find_element_by_id("edit-columns-button").click()
# otable is the recipes table here
otable_head_text = self.get_table_head_text('otable')
for item in ["Layer", "Layer branch", "Layer commit"]:
self.failIf(item not in otable_head_text, msg=item+' not in table head.')
# click the fist recipe, whatever it is
self.get_table_element("otable", 1, 1).click()
self.assertTrue(self.is_text_present(["Layer", "Layer branch", "Layer commit", "Recipe file"]), \
msg=("text not in web page"))
# step 2: test Packages page stuff. almost same as above
self.driver.back()
self.browser_delay()
self.driver.find_element_by_link_text("Packages").click()
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("recipe__layer_version__layer__name").click()
self.driver.find_element_by_id("recipe__layer_version__branch").click()
self.driver.find_element_by_id("recipe__layer_version__layer__commit").click()
self.driver.find_element_by_id("edit-columns-button").click()
otable_head_text = self.get_table_head_text("otable")
for item in ["Layer", "Layer branch", "Layer commit"]:
self.assertFalse(item not in otable_head_text, msg=("item %s should be in head row" % item))
# click the fist recipe, whatever it is
self.get_table_element("otable", 1, 1).click()
self.assertTrue(self.is_text_present(["Layer", "Layer branch", "Layer commit"]), \
msg=("text not in web page"))
# step 3: test Packages core-image-minimal(images) stuff. almost same as above. Note when future element-id changes...
self.driver.back()
self.driver.find_element_by_link_text("core-image-minimal").click()
self.driver.find_element_by_id("edit-columns-button").click()
self.driver.find_element_by_id("layer_name").click()
self.driver.find_element_by_id("layer_branch").click()
self.driver.find_element_by_id("layer_commit").click()
self.driver.find_element_by_id("edit-columns-button").click()
otable_head_text = self.get_table_head_text("otable")
for item in ["Layer", "Layer branch", "Layer commit"]:
self.assertFalse(item not in otable_head_text, msg=("item %s should be in head row" % item))
# click the fist recipe, whatever it is
self.get_table_element("otable", 1, 1).click()
self.assertTrue(self.is_text_present(["Layer", "Layer branch", "Layer commit"]), \
msg=("text not in web page"))
# step 4: check Configuration page
self.driver.back()
self.driver.find_element_by_link_text("Configuration").click()
otable_head_text = self.get_table_head_text()
self.assertTrue(self.is_text_present(["Layer", "Layer branch", "Layer commit"]), \
msg=("text not in web page"))
##############
# CASE 945 #
##############
def test_945(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
for item in ["Packages", "Recipes", "Tasks"]:
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.driver.find_element_by_link_text(items).click()
# this may be page specific. If future page content changes, try to replace it with new xpath
xpath_showrows = "/html/body/div[4]/div/div/div[2]/div[2]/div[2]/div/div/div[2]/select"
xpath_table = "html/body/div[4]/div/div/div[2]/div[2]/table/tbody"#"id=('otable')/tbody"
self.driver.find_element_by_xpath(xpath_showrows).click()
rows_displayed = int(self.driver.find_element_by_xpath(xpath_showrows + "/option[2]").text)
# not sure if this is a Selenium Select bug: If page is not refreshed here, "select(by visible text)" operation will go back to 100-row page
# Sure we can use driver.get(url) to refresh page, but since page will vary, we use click link text here
self.driver.find_element_by_link_text(items).click()
Select(self.driver.find_element_by_css_selector("select.pagesize")).select_by_visible_text(str(rows_displayed))
self.failUnless(self.is_element_present(By.XPATH, xpath_table + "/tr[" + str(rows_displayed) +"]"))
self.failIf(self.is_element_present(By.XPATH, xpath_table + "/tr[" + str(rows_displayed+1) +"]"))
# click 1st package, then go back to check if it's still those rows shown.
self.driver.find_element_by_xpath(xpath_otable + "/tr[1]/td[1]/a").click()
time.sleep(3)
self.driver.find_element_by_link_text(item).click()
self.assertTrue(self.is_element_present(By.XPATH, xpath_otable + "/tr[" + str(option_tobeselected) +"]"),\
msg=("Row %d should exist" %option_tobeselected))
self.assertFalse(self.is_element_present(By.XPATH, xpath_otable + "/tr[" + str(option_tobeselected+1) +"]"),\
msg=("Row %d should not exist" %(option_tobeselected+1)))
##############
# CASE 946 #
##############
def test_946(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.driver.find_element_by_link_text("Configuration").click()
# step 3-4
check_list = ["Summary", "BitBake variables"]
for item in check_list:
if not self.is_element_present(how=By.LINK_TEXT, what=item):
self.log.error("%s not found" %item)
if not self.is_text_present(['Layers', 'Layer', 'Layer branch', 'Layer commit']):
self.log.error("text not found")
# step 5
self.driver.find_element_by_link_text("BitBake variables").click()
if not self.is_text_present(['Variable', 'Value', 'Set in file', 'Description']):
self.log.error("text not found")
# This may be unstable because it's page-specific
# step 6: this is how we find filter beside "Set in file"
temp_element = self.find_element_by_text_in_table('otable', "Set in file")
temp_element.find_element_by_xpath("..//*/a/i[@class='icon-filter filtered']").click()
self.browser_delay()
self.driver.find_element_by_xpath("(//input[@name='filter'])[3]").click()
btns = self.driver.find_elements_by_css_selector("button.btn.btn-primary")
for btn in btns:
try:
btn.click()
break
except:
pass
# save screen here
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step6')
self.driver.find_element_by_id("edit-columns-button").click()
# save screen here
# step 7
# we should manually check the step 6-8 result using screenshot
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step7')
self.driver.find_element_by_id("edit-columns-button").click()
# step 9
# click the 1st item, no matter what it is
self.driver.find_element_by_xpath("//*[@id='otable']/tbody/tr[1]/td[1]/a").click()
# give it 1 sec so the pop-up becomes the "active_element"
time.sleep(1)
element = self.driver.switch_to.active_element
check_list = ['Order', 'Configuration file', 'Operation', 'Line number']
for item in check_list:
if item not in element.text:
self.log.error("%s not found" %item)
# any better way to close this pop-up? ... TBD
element.find_element_by_class_name("close").click()
# step 10 : need to manually check "Yocto Manual" in saved screen
self.driver.find_element_by_css_selector("i.icon-share.get-info").click()
# save screen here
time.sleep(5)
self.save_screenshot(screenshot_type='native', append_name='step10')
##############
# CASE 947 #
##############
def test_947(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.find_element_by_link_text_in_table('nav', 'Configuration').click()
# step 2
self.driver.find_element_by_link_text("BitBake variables").click()
# step 3
def xpath_option(column_name):
# return xpath of options under "Edit columns" button
return self.shortest_xpath('id', 'navTab') + self.shortest_xpath('id', 'editcol') \
+ self.shortest_xpath('id', column_name)
self.driver.find_element_by_id('edit-columns-button').click()
# by default, option "Description" and "Set in file" were checked
self.driver.find_element_by_xpath(xpath_option('description')).click()
self.driver.find_element_by_xpath(xpath_option('file')).click()
self.driver.find_element_by_id('edit-columns-button').click()
check_list = ['Description', 'Set in file']
head_list = self.get_table_head_text('otable')
for item in check_list:
self.assertFalse(item in head_list, msg=("item %s should not be in head row" % item))
# check these 2 options and verify again
self.driver.find_element_by_id('edit-columns-button').click()
self.driver.find_element_by_xpath(xpath_option('description')).click()
self.driver.find_element_by_xpath(xpath_option('file')).click()
self.driver.find_element_by_id('edit-columns-button').click()
head_list = self.get_table_head_text('otable')
for item in check_list:
self.assertTrue(item in head_list, msg=("item %s not in head row" % item))
##############
# CASE 948 #
##############
def test_948(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.find_element_by_link_text_in_table('nav', 'Configuration').click()
self.driver.find_element_by_link_text("BitBake variables").click()
#get number of variables visible by default
number_before_search = self.driver.find_element_by_class_name('page-header').text
# search for a while...
self.driver.find_element_by_id("search").clear()
self.driver.find_element_by_id("search").send_keys("BB")
self.driver.find_element_by_id("search-button").click()
#get number of variables visible after search
number_after_search = self.driver.find_element_by_class_name('page-header').text
self.assertTrue(number_before_search > number_after_search, msg=("items should be less after search"))
##############
# CASE 949 #
##############
def test_949(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_link_text("core-image-minimal").click()
self.find_element_by_link_text_in_table('nav', 'core-image-minimal').click()
# step 3
try:
self.driver.find_element_by_partial_link_text("Packages included")
self.driver.find_element_by_partial_link_text("Directory structure")
except Exception as e:
self.log.error(e)
self.assertFalse(True)
# step 4
head_list = self.get_table_head_text('otable')
for item in ['Package', 'Package version', 'Size', 'Dependencies', 'Reverse dependencies', 'Recipe']:
self.assertTrue(item in head_list, msg=("item %s not in head row" % item))
# step 5-6
self.driver.find_element_by_id("edit-columns-button").click()
selectable_class = 'checkbox'
# minimum-table : means unselectable items
unselectable_class = 'checkbox muted'
selectable_check_list = ['Dependencies', 'Layer', 'Layer branch', 'Layer commit', \
'License', 'Recipe', 'Recipe version', 'Reverse dependencies', \
'Size', 'Size over total (%)']
unselectable_check_list = ['Package', 'Package version']
selectable_list = list()
unselectable_list = list()
selectable_elements = self.driver.find_elements_by_xpath("//*[@id='editcol']//*[@class='" + selectable_class + "']")
unselectable_elements = self.driver.find_elements_by_xpath("//*[@id='editcol']//*[@class='" + unselectable_class + "']")
for element in selectable_elements:
selectable_list.append(element.text)
for element in unselectable_elements:
unselectable_list.append(element.text)
# check them
for item in selectable_check_list:
self.assertTrue(item in selectable_list, msg=("%s not found in dropdown menu" % item))
for item in unselectable_check_list:
self.assertTrue(item in unselectable_list, msg=("%s not found in dropdown menu" % item))
self.driver.find_element_by_id("edit-columns-button").click()
# step 7
self.driver.find_element_by_partial_link_text("Directory structure").click()
head_list = self.get_table_head_text('dirtable')
for item in ['Directory / File', 'Symbolic link to', 'Source package', 'Size', 'Permissions', 'Owner', 'Group']:
self.assertTrue(item in head_list, msg=("%s not found in Directory structure table head" % item))
##############
# CASE 950 #
##############
def test_950(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
# step3&4: so far we're not sure if there's "successful build" or "failed
# build".If either of them doesn't exist, we can still go on other steps
check_list = ['Configuration', 'Tasks', 'Recipes', 'Packages', 'Time', 'CPU usage', 'Disk I/O']
has_successful_build = 1
has_failed_build = 1
try:
pass_icon = self.driver.find_element_by_xpath("//*[@class='icon-ok-sign success']")
except Exception:
self.log.info("no successful build exists")
has_successful_build = 0
pass
if has_successful_build:
pass_icon.click()
# save screen here to check if it matches requirement.
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step3_1')
for item in check_list:
try:
self.find_element_by_link_text_in_table('nav', item)
except Exception:
self.assertFalse(True, msg=("link %s cannot be found in the page" % item))
# step 6
check_list_2 = ['Packages included', 'Total package size', \
'License manifest', 'Image files']
self.assertTrue(self.is_text_present(check_list_2), msg=("text not in web page"))
self.driver.back()
try:
fail_icon = self.driver.find_element_by_xpath("//*[@class='icon-minus-sign error']")
except Exception:
has_failed_build = 0
self.log.info("no failed build exists")
pass
if has_failed_build:
fail_icon.click()
# save screen here to check if it matches requirement.
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step3_2')
for item in check_list:
try:
self.find_element_by_link_text_in_table('nav', item)
except Exception:
self.assertFalse(True, msg=("link %s cannot be found in the page" % item))
# step 7 involved
check_list_3 = ['Machine', 'Distro', 'Layers', 'Total number of tasks', 'Tasks executed', \
'Tasks not executed', 'Reuse', 'Recipes built', 'Packages built']
self.assertTrue(self.is_text_present(check_list_3), msg=("text not in web page"))
self.driver.back()
##############
# CASE 951 #
##############
def test_951(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
# currently test case itself isn't responsible for creating "1 successful and
# 1 failed build"
has_successful_build = 1
has_failed_build = 1
try:
fail_icon = self.driver.find_element_by_xpath("//*[@class='icon-minus-sign error']")
except Exception:
has_failed_build = 0
self.log.info("no failed build exists")
pass
# if there's failed build, we can proceed
if has_failed_build:
self.driver.find_element_by_partial_link_text("error").click()
self.driver.back()
# not sure if there "must be" some warnings, so here save a screen
self.browser_delay()
self.save_screenshot(screenshot_type='selenium', append_name='step4')
##############
# CASE 955 #
##############
def test_955(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.log.info(" You should manually create all images before test starts!")
# So far the case itself is not responsable for creating all sorts of images.
# So assuming they are already there
# step 2
self.driver.find_element_by_link_text("core-image-minimal").click()
# save screen here to see the page component
##############
# CASE 956 #
##############
def test_956(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
# step 2-3 need to run manually
self.log.info("step 2-3: checking the help message when you hover on help icon of target,\
tasks, recipes, packages need to run manually")
self.driver.find_element_by_partial_link_text("Manual").click()
if not self.is_text_present("Manual"):
self.log.error("please check [Toaster manual] link on page")
self.failIf(True)
####################################################################################################
# Starting backend tests ###########################################################################
####################################################################################################
##############
# CASE 1066 #
##############
def test_1066(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select count(name) from orm_project a, auth_user b where a.user_id = b.id and b.username='_anonuser';"
cursor.execute(query)
data = cursor.fetchone()
self.failUnless(data >= 1)
##############
# CASE 1071 #
##############
def test_1071(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select name from orm_release;"
cursor.execute(query)
data = cursor.fetchall()
for i in range(0,4):
data[i] = data[i][0]
data.sort()
print(data)
json_parse = json.loads(open('toasterconf.json').read())
json_data = []
for i in range (0,4):
json_data.append(json_parse['releases'][i]['name'])
json_data.sort()
print(json_data)
self.failUnless(data == json_data)
##############
# CASE 1072 #
##############
def test_1072(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select value from orm_toastersetting where name like 'DEFCONF%';"
cursor.execute(query)
data = cursor.fetchall()
for i in range(0,6):
data[i] = data[i][0]
print(data)
json_parse = json.loads(open('toasterconf.json').read())
json_data=json_parse['config']
json_data = json_data.values()
print(json_data)
self.failUnless(data == json_data)
##############
# CASE 1074 #
##############
def test_1074(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select name from orm_layersource;"
cursor.execute(query)
data = cursor.fetchall()
for i in range(0,3):
data[i] = data[i][0]
print(data)
json_parse = json.loads(open('toasterconf.json').read())
json_data = []
for i in range(0,3):
json_data.append(json_parse['layersources'][i]['name'])
print(json_data)
self.failUnless(set(data) == set(json_data))
##############
# CASE 1075 #
##############
def test_1075(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select value from orm_toastersetting where name like 'DEFAULT_RELEASE';"
cursor.execute(query)
data = cursor.fetchall()
data = data[0][0]
print(data)
json_parse = json.loads(open('toasterconf.json').read())
json_data = json_parse['defaultrelease']
print(json_data)
self.failUnless(set(data) == set(json_data))
##############
# CASE 1076 #
##############
def test_1076(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
print('Checking branches for "Local Yocto Project"')
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select name from orm_branch where layer_source_id=1;"
cursor.execute(query)
data = cursor.fetchall()
lenght = len(data)
try:
for i in range(0,lenght):
data[i] = data[i][0]
except:
pass
print(data)
json_parse = json.loads(open('toasterconf.json').read())
json_location = json_parse['layersources'][0]['name']
print(json_location)
json_data = json_parse['layersources'][0]['branches']
print(json_data)
self.failUnless(set(data) == set(json_data))
print('Checking branches for "OpenEmbedded"')
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select name from orm_branch where layer_source_id=3;"
cursor.execute(query)
data = cursor.fetchall()
lenght = len(data)
for i in range(0,lenght):
data[i] = data[i][0]
print(data)
json_parse = json.loads(open('toasterconf.json').read())
json_location = json_parse['layersources'][1]['name']
print(json_location)
json_data = json_parse['layersources'][1]['branches']
print(json_data)
self.failUnless(set(data) == set(json_data))
print('Checking branches for "Imported layers"')
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select name from orm_branch where layer_source_id=2;"
cursor.execute(query)
data = cursor.fetchall()
lenght = len(data)
for i in range(0,lenght):
data[i] = data[i][0]
print(data)
json_parse = json.loads(open('toasterconf.json').read())
json_location = json_parse['layersources'][2]['name']
print(json_location)
json_data = json_parse['layersources'][2]['branches']
print(json_data)
self.failUnless(set(data) == set(json_data))
##############
# CASE 1077 #
##############
def test_1077(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select name from orm_bitbakeversion;"
cursor.execute(query)
data = cursor.fetchall()
for i in range(0,4):
data[i] = data[i][0]
print(data)
json_parse = json.loads(open('toasterconf.json').read())
json_data = []
for i in range(0,4):
json_data.append(json_parse['bitbake'][i]['name'])
print(json_data)
self.failUnless(set(data) == set(json_data))
##############
# CASE 1083 #
##############
def test_1083(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_id("new-project-button").click()
self.driver.find_element_by_id("new-project-name").send_keys("new-test-project")
self.driver.find_element_by_id("create-project-button").click()
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select count(name) from orm_project where name = 'new-test-project';"
cursor.execute(query)
data = cursor.fetchone()
print('data: %s' % data)
self.failUnless(data >= 1)
##############
# CASE 1084 #
##############
def test_1084(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_id("new-project-button").click()
self.driver.find_element_by_id("new-project-name").send_keys("new-default-project")
self.driver.find_element_by_id("create-project-button").click()
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select a.name from orm_release a, orm_project b where a.id = b.release_id and b.name = 'new-default-project' limit 1;"
cursor.execute(query)
db_data = str(cursor.fetchone()[0])
json_parse = json.loads(open('toasterconf.json').read())
json_data = str(json_parse['defaultrelease'])
self.failUnless(db_data == json_data)
##############
# CASE 1088 #
##############
def test_1088(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_css_selector("a[href='/toastergui/projects/']").click()
self.driver.find_element_by_link_text('new-default-project').click()
self.driver.find_element_by_id('project-change-form-toggle').click()
self.driver.find_element_by_id('project-name-change-input').clear()
self.driver.find_element_by_id('project-name-change-input').send_keys('new-name')
self.driver.find_element_by_id('project-name-change-btn').click()
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select count(name) from orm_project where name = 'new-name';"
cursor.execute(query)
data = cursor.fetchone()[0]
self.failUnless(data == 1)
#reseting project name
self.driver.find_element_by_id('project-change-form-toggle').click()
self.driver.find_element_by_id('project-name-change-input').clear()
self.driver.find_element_by_id('project-name-change-input').send_keys('new-default-project')
self.driver.find_element_by_id('project-name-change-btn').click()
##############
# CASE 1089 #
##############
def test_1089(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_css_selector("a[href='/toastergui/projects/']").click()
self.driver.find_element_by_link_text('new-default-project').click()
self.driver.find_element_by_id('change-machine-toggle').click()
self.driver.find_element_by_id('machine-change-input').clear()
self.driver.find_element_by_id('machine-change-input').send_keys('qemuarm64')
# self.driver.find_element_by_id('machine-change-input').send_keys(Keys.RETURN)
self.driver.find_element_by_id('machine-change-btn').click()
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select count(id) from orm_projectvariable where name like 'machine' and value like 'qemuarm64';"
cursor.execute(query)
data = cursor.fetchone()[0]
self.failUnless(data == 1)
#resetting machine to default value
self.driver.find_element_by_id('change-machine-toggle').click()
self.driver.find_element_by_id('machine-change-input').clear()
self.driver.find_element_by_id('machine-change-input').send_keys('qemux86')
self.driver.find_element_by_id('machine-change-input').send_keys(Keys.RETURN)
self.driver.find_element_by_id('machine-change-btn').click()
##############
# CASE 1090 #
##############
def test_1090(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select username from auth_user where is_superuser = 1;"
cursor.execute(query)
data = cursor.fetchall()
try:
data = data[0][0]
except:
pass
print(data)
self.failUnless(data == 'toaster_admin')
##############
# CASE 1091 #
##############
def test_1091(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
self.driver.get(self.base_url)
self.driver.find_element_by_css_selector("a[href='/toastergui/projects/']").click()
self.driver.find_element_by_link_text('new-default-project').click()
self.driver.find_element_by_id('release-change-toggle').click()
dropdown = self.driver.find_element_by_css_selector('select')
for option in dropdown.find_elements_by_tag_name('option'):
if option.text == 'Local Yocto Project':
option.click()
self.driver.find_element_by_id('change-release-btn').click()
#wait for the changes to register in the DB
time.sleep(1)
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select count(*) from orm_layer_version a, orm_projectlayer b, orm_project c where a.\"commit\"=\"HEAD\" and a.id = b.layercommit_id and b.project_id=c.id and c.name='new-default-project';"
cursor.execute(query)
data = cursor.fetchone()[0]
#resetting release to default
self.driver.find_element_by_id('release-change-toggle').click()
dropdown = self.driver.find_element_by_css_selector('select')
for option in dropdown.find_elements_by_tag_name('option'):
if option.text == 'Yocto Project master':
option.click()
self.driver.find_element_by_id('change-release-btn').click()
#wait for the changes to register in the DB
time.sleep(1)
self.failUnless(data == 3)
##############
# CASE 1092 #
##############
def test_1092(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
self.driver.maximize_window()
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select a.name, a.value from orm_projectvariable a, orm_project b where a.project_id = b.id and b.name = 'new-default-project';"
cursor.execute(query)
data = dict(cursor.fetchall())
print(data)
default_values = {u'IMAGE_INSTALL_append': u'', u'PACKAGE_CLASSES': u'package_rpm', u'MACHINE': u'qemux86', u'SDKMACHINE': u'x86_64', u'DISTRO': u'poky', u'IMAGE_FSTYPES': u'ext3 jffs2 tar.bz2'}
self.failUnless(data == default_values)
##############
# CASE 1093 #
##############
def test_1093(self):
self.case_no = self.get_case_number()
self.log.info(' CASE %s log: ' % str(self.case_no))
#get initial values
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select layercommit_id from orm_projectlayer a, orm_project b where a.project_id=b.id and b.name='new-default-project';"
cursor.execute(query)
data_initial = cursor.fetchall()
print(data_initial)
self.driver.maximize_window()
self.driver.get('localhost:8000')#self.base_url)
self.driver.find_element_by_css_selector("a[href='/toastergui/projects/']").click()
self.driver.find_element_by_link_text('new-default-project').click()
self.driver.find_element_by_id('release-change-toggle').click()
dropdown = self.driver.find_element_by_css_selector('select')
for option in dropdown.find_elements_by_tag_name('option'):
if option.text == 'Local Yocto Project':
option.click()
self.driver.find_element_by_id('change-release-btn').click()
#wait for the changes to register in the DB
time.sleep(1)
#get changed values
con=sqlite.connect('toaster.sqlite')
cursor = con.cursor()
query = "select layercommit_id from orm_projectlayer a, orm_project b where a.project_id=b.id and b.name='new-default-project';"
cursor.execute(query)
data_changed = cursor.fetchall()
print(data_changed)
#resetting release to default
self.driver.find_element_by_id('release-change-toggle').click()
dropdown = self.driver.find_element_by_css_selector('select')
for option in dropdown.find_elements_by_tag_name('option'):
if option.text == 'Yocto Project master':
option.click()
self.driver.find_element_by_id('change-release-btn').click()
#wait for the changes to register in the DB
time.sleep(1)
self.failUnless(data_initial != data_changed)
|
the-stack_0_27262
|
"""
Forsteri Client
Copyright (c) 2014, 2015 Andrew Hawkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import sys
import wx
from forsteri import gui
class ForsteriClient(object):
"""
"""
def __init__(self, username, path):
self.USERNAME = username
self.PATH = path
def run(self):
# Create the application.
app = wx.App()
# Check if the splash screen flag was given.
if "-s" in sys.argv:
# Create the bitmap used for the splash screen.
here = os.path.abspath(os.path.dirname(__file__))
bitmap = wx.Bitmap(os.path.join(here, "..", "data", "img",
"logo.png"), wx.BITMAP_TYPE_PNG)
# Create the splash screen.
splash = wx.SplashScreen(bitmap,
wx.SPLASH_CENTRE_ON_SCREEN|wx.SPLASH_TIMEOUT, 1000, None)
# Create the main frame.
gui.Main(None, style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER)
# Start the application main loop.
app.MainLoop()
if __name__ == "__main__":
CLIENT = ForsteriClient()
CLIENT.run()
|
the-stack_0_27263
|
#!/usr/bin/env python
# Set True to force compile native C-coded extension providing direct access
# to inotify's syscalls. If set to False this extension will only be compiled
# if no inotify interface from ctypes is found.
compile_ext_mod = False
# import statements
import os
import sys
import distutils.extension
from distutils.util import get_platform
try:
# First try to load most advanced setuptools setup.
from setuptools import setup
except:
# Fall back if setuptools is not installed.
from distutils.core import setup
platform = get_platform()
# check Python's version
if sys.version_info < (2, 4):
sys.stderr.write('This module requires at least Python 2.4\n')
sys.exit(1)
# check linux platform
if not platform.startswith('linux') and not platform.startswith('freebsd'):
sys.stderr.write("inotify is not available on %s\n" % platform)
sys.exit(1)
classif = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Filesystems',
'Topic :: System :: Monitoring',
]
# Select branch
if sys.version_info >= (3, 0):
package_dir = {'': 'python3'}
else:
package_dir = {'': 'python2'}
def should_compile_ext_mod():
try:
import ctypes
import ctypes.util
except:
return True
try_libc_name = 'c'
if platform.startswith('freebsd'):
try_libc_name = 'inotify'
libc_name = None
try:
libc_name = ctypes.util.find_library(try_libc_name)
except:
pass # Will attemp to load it with None anyway.
libc = ctypes.CDLL(libc_name)
# Eventually check that libc has needed inotify bindings.
if (not hasattr(libc, 'inotify_init') or
not hasattr(libc, 'inotify_add_watch') or
not hasattr(libc, 'inotify_rm_watch')):
return True
return False
ext_mod = []
if compile_ext_mod or should_compile_ext_mod():
# add -fpic if x86_64 arch
if platform in ["linux-x86_64"]:
os.environ["CFLAGS"] = "-fpic"
# sources for ext module
ext_mod_src = ['common/inotify_syscalls.c']
# dst for ext module
ext_mod.append(distutils.extension.Extension('inotify_syscalls',
ext_mod_src))
setup(
name='pyinotify',
version='0.9.6',
description='Linux filesystem events monitoring',
author='Sebastien Martini',
author_email='[email protected]',
platforms='Linux',
classifiers=classif,
url='http://github.com/seb-m/pyinotify',
download_url='http://pypi.python.org/pypi/pyinotify',
ext_modules=ext_mod,
py_modules=['pyinotify'],
package_dir=package_dir,
)
|
the-stack_0_27268
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import torch
import torch.utils.data
from opts import opts
from MOC_utils.model import create_model, load_model, save_model, load_coco_pretrained_model, load_imagenet_pretrained_model
from trainer.logger import Logger
from datasets.init_dataset import get_dataset
from trainer.moc_trainer import MOCTrainer
from inference.stream_inference import stream_inference
from ACT import frameAP
import numpy as np
import random
import tensorboardX
GLOBAL_SEED = 317
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
def worker_init_fn(dump):
set_seed(GLOBAL_SEED)
def main(opt):
set_seed(opt.seed)
torch.backends.cudnn.benchmark = True
print()
print('dataset: ' + opt.dataset + ' task: ' + opt.task)
Dataset = get_dataset(opt.dataset)
opt = opts().update_dataset(opt, Dataset)
train_writer = tensorboardX.SummaryWriter(log_dir=os.path.join(opt.log_dir, 'train'))
epoch_train_writer = tensorboardX.SummaryWriter(log_dir=os.path.join(opt.log_dir, 'train_epoch'))
val_writer = tensorboardX.SummaryWriter(log_dir=os.path.join(opt.log_dir, 'val'))
epoch_val_writer = tensorboardX.SummaryWriter(log_dir=os.path.join(opt.log_dir, 'val_epoch'))
logger = Logger(opt, epoch_train_writer, epoch_val_writer)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
model = create_model(opt.arch, opt.branch_info, opt.head_conv, opt.K)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = opt.start_epoch
if opt.pretrain_model == 'coco':
model = load_coco_pretrained_model(opt, model)
else:
model = load_imagenet_pretrained_model(opt, model)
if opt.load_model != '':
model, optimizer, _, _ = load_model(model, opt.load_model, optimizer, opt.lr, opt.ucf_pretrain)
trainer = MOCTrainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=opt.pin_memory,
drop_last=True,
worker_init_fn=worker_init_fn
)
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.num_workers,
pin_memory=opt.pin_memory,
drop_last=True,
worker_init_fn=worker_init_fn
)
print('training...')
print('GPU allocate:', opt.chunk_sizes)
best_ap = 0
best_epoch = 0
stop_step = 0
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
print('eopch is ', epoch)
log_dict_train = trainer.train(epoch, train_loader, train_writer)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('epcho/{}'.format(k), v, epoch, 'train')
logger.write('train: {} {:8f} | '.format(k, v))
logger.write('\n')
if opt.save_all and not opt.auto_stop:
time_str = time.strftime('%Y-%m-%d-%H-%M')
model_name = 'model_[{}]_{}.pth'.format(epoch, time_str)
save_model(os.path.join(opt.save_dir, model_name),
model, optimizer, epoch, log_dict_train['loss'])
else:
model_name = 'model_last.pth'
save_model(os.path.join(opt.save_dir, model_name),
model, optimizer, epoch, log_dict_train['loss'])
# this step evaluate the model
if opt.val_epoch:
with torch.no_grad():
log_dict_val = trainer.val(epoch, val_loader, val_writer)
for k, v in log_dict_val.items():
logger.scalar_summary('epcho/{}'.format(k), v, epoch, 'val')
logger.write('val: {} {:8f} | '.format(k, v))
logger.write('\n')
if opt.auto_stop:
tmp_rgb_model = opt.rgb_model
tmp_flow_model = opt.flow_model
if opt.rgb_model != '':
opt.rgb_model = os.path.join(opt.rgb_model, model_name)
if opt.flow_model != '':
opt.flow_model = os.path.join(opt.flow_model, model_name)
stream_inference(opt)
ap = frameAP(opt, print_info=opt.print_log)
os.system("rm -rf tmp")
if ap > best_ap:
best_ap = ap
best_epoch = epoch
saved1 = os.path.join(opt.save_dir, model_name)
saved2 = os.path.join(opt.save_dir, 'model_best.pth')
os.system("cp " + str(saved1) + " " + str(saved2))
if stop_step < len(opt.lr_step) and epoch >= opt.lr_step[stop_step]:
model, optimizer, _, _ = load_model(
model, os.path.join(opt.save_dir, 'model_best.pth'), optimizer, opt.lr)
opt.lr = opt.lr * 0.1
logger.write('Drop LR to ' + str(opt.lr) + '\n')
print('Drop LR to ' + str(opt.lr))
print('load epoch is ', best_epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = opt.lr
torch.cuda.empty_cache()
trainer = MOCTrainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
stop_step = stop_step + 1
opt.rgb_model = tmp_rgb_model
opt.flow_model = tmp_flow_model
else:
# this step drop lr
if epoch in opt.lr_step:
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
logger.write('Drop LR to ' + str(lr) + '\n')
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.auto_stop:
print('best epoch is ', best_epoch)
logger.close()
if __name__ == '__main__':
os.system("rm -rf tmp")
opt = opts().parse()
main(opt)
|
the-stack_0_27269
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cifar dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.image_classification import cifar
# testing/cifar.py generates fake input data
class Cifar10Test(testing.DatasetBuilderTestCase):
DATASET_CLASS = cifar.Cifar10
SPLITS = {
"train": 10,
"test": 2,
}
class Cifar100Test(testing.DatasetBuilderTestCase):
DATASET_CLASS = cifar.Cifar100
SPLITS = {
"train": 10,
"test": 2,
}
if __name__ == "__main__":
testing.test_main()
|
the-stack_0_27270
|
""" Herein we test if all the different wiki interactions work.
"""
import pytest
@pytest.fixture
def wiki_client(app, session, client):
"""Fixture for wiki tests.
"""
from pygameweb.wiki.views import add_wiki_blueprint
from pygameweb.user.views import add_user_blueprint
add_user_blueprint(app)
add_wiki_blueprint(app)
return client
@pytest.fixture
def user(app, session, wiki_client):
""" gives us a user who is a member.
"""
from pygameweb.user.models import User
from flask_security.utils import encrypt_password
user = User(name='joe',
email='[email protected]',
password=encrypt_password('password'))
session.add(user)
session.commit()
# https://flask-login.readthedocs.org/en/latest/#fresh-logins
with wiki_client.session_transaction() as sess:
sess['user_id'] = user.id
sess['_fresh'] = True
return user
@pytest.fixture
def member(session, user):
"""
"""
from pygameweb.user.models import Group
group = Group(name='members', title='Member')
user.roles.append(group)
session.add(group)
session.commit()
return group
@pytest.fixture
def admin(session, user):
"""
"""
from pygameweb.user.models import Group
group = Group(name='admin', title='Admin')
user.roles.append(group)
session.add(group)
group = Group(name='members', title='Member')
user.roles.append(group)
session.add(group)
session.commit()
return group
@pytest.fixture
def wiki_page_info(session):
import datetime
from pygameweb.wiki.models import Wiki
first_content = 'some content<br/> yo.'
second_content = 'We all love content.'
first_changes = 'first wiki page version is done'
second_changes = 'new changes to the wiki page'
wiki_page = Wiki(link='blablabla',
title='Yo title',
datetimeon=datetime.datetime(2017, 1, 12),
content=first_content,
changes=first_changes,
latest=1)
session.add(wiki_page)
session.commit()
first_id = wiki_page.id
# change the title.
wiki_page.new_version(session)
wiki_page.title = 'A new title for a new day'
wiki_page.content = second_content
wiki_page.changes = second_changes
session.add(wiki_page)
session.commit()
return [wiki_page, first_content, second_content,
first_id, first_changes, second_changes]
def test_wiki_link(wiki_client, session, wiki_page_info):
""" works when we pass the correct wiki link.
"""
wiki_page, first_content, second_content, first_id, _, _ = wiki_page_info
second_id = wiki_page.id
assert second_id != first_id
for link in ['/wiki/blablabla', '/wiki/blablabla/']:
resp = wiki_client.get(link)
assert resp.status_code == 200
assert b'A new title for a new day' in resp.data
resp = wiki_client.get('/wiki/blablabla?action=source')
assert resp.status_code == 200
expected = b'A new title for a new day'
assert expected not in resp.data, 'because only the content is shown'
assert second_content in resp.data.decode('utf-8')
resp = wiki_client.get(f'/wiki/blablabla?action=source&id={first_id}')
assert (first_content in
resp.data.decode('utf-8')), 'because old page version still there'
url = ('/wiki/blablabla?action=diff&oldid={oldid}&newid={newid}'
.format(oldid=first_id, newid=second_id))
resp = wiki_client.get(url)
assert b'<div class="delete">-' in resp.data, 'some lines are deleted'
assert b'<div class="insert">+' in resp.data, 'some lines are inserted'
resp = wiki_client.get('/wiki/blablabla?action=history')
assert resp.status_code == 302, 'login member required'
def test_wiki_recent(wiki_client, session, wiki_page_info):
""" works when we pass the correct wiki link.
"""
(_, _, _, _, first_changes, second_changes) = wiki_page_info
resp = wiki_client.get('/wiki/recent')
assert resp.status_code == 200
resp = wiki_client.get('/wiki/recent.php')
assert resp.status_code == 200
assert first_changes in resp.data.decode('utf-8')
assert second_changes in resp.data.decode('utf-8')
def test_wiki_link_login(wiki_client, session, wiki_page_info, member):
resp = wiki_client.get('/wiki/blablabla?action=history')
assert resp.status_code == 200
assert b'new changes to the wiki page' in resp.data
assert b'first wiki page version is done' in resp.data
def test_wiki_locked(wiki_client, session, wiki_page_info, user):
""" stops a page from being edited or reverted.
"""
wiki_page, first_content, second_content, first_id, _, _ = wiki_page_info
wiki_page.locked = True
session.add(wiki_page)
session.commit()
resp = wiki_client.get('/wiki/blablabla/edit')
assert resp.status_code == 302
assert resp.location == 'http://localhost/'
data = dict(changes='I have changed.', content='some content')
resp = wiki_client.post('/wiki/blabla/edit',
data=data)
assert resp.status_code == 302
assert resp.location == 'http://localhost/'
def test_wiki_locked_admin(wiki_client, session, wiki_page_info, user, admin):
""" admin should be able to edit it, and revert it.
"""
wiki_page, first_content, second_content, first_id, _, _ = wiki_page_info
wiki_page.locked = True
session.add(wiki_page)
session.commit()
resp = wiki_client.get('/wiki/blablabla/edit')
assert resp.status_code == 200
data = dict(changes='I have changed.', content='some content')
resp = wiki_client.post('/wiki/blabla/edit',
data=data)
assert resp.status_code == 302
assert resp.location == 'http://localhost/wiki/blabla'
def test_wiki_new_page(wiki_client, session, member, user):
""" is editable when we go there.
"""
from pygameweb.wiki.models import Wiki
resp = wiki_client.get('/wiki/blabla')
assert resp.status_code == 404, 'now there is no blabla page.'
resp = wiki_client.get('/wiki/blabla/edit')
assert resp.status_code == 200
assert b'blabla' in resp.data
data = dict(changes='I have changed.', content='some content')
resp = wiki_client.post('/wiki/blabla/edit',
data=data,
follow_redirects=True)
assert resp.status_code == 200
assert b'blabla' in resp.data
assert b'some content' in resp.data
wik = session.query(Wiki).filter(Wiki.users_id == user.id).first()
assert wik.content == data['content'], 'user id added to this version'
resp = wiki_client.get('/wiki/blabla')
assert resp.status_code == 200
assert b'blabla' in resp.data
assert b'some content' in resp.data, 'now the blabla page exists'
resp = wiki_client.get('/wiki/blabla?action=history')
assert resp.status_code == 200
assert b'I have changed.' in resp.data
def test_wiki_index(wiki_client, session):
""" is shown as the default.
"""
from pygameweb.wiki.models import Wiki
session.add(Wiki(link='index', title='Yo title', latest=1))
session.commit()
resp = wiki_client.get('/wiki/')
assert resp.status_code == 200
assert b'Yo title' in resp.data
|
the-stack_0_27271
|
import numpy as np
import torch
from torch.autograd import Variable
from helpers.utils import progress_bar
from helpers.loaders import batch_gen
# Train function
def CrossEnt(x, y):
return (- x * torch.log(y.clamp(min=1e-7))).sum()
def IsInside(x, Y):
for y in Y:
if x is y:
return True
return False
def RandomTransform(x, device):
x = x + torch.cuda.FloatTensor(x.size()).normal_(0, 0.05)
theta = torch.zeros((x.size(0), 2, 3)).to(device)
sign = (torch.randint(0, 2, size=(x.size(0), 1, 1), dtype=torch.float) * 2 - 1).to(device)
theta[:, 0:1, 0:1] = torch.cuda.FloatTensor(x.size(0), 1, 1).normal_(1, 0.1) * sign
theta[:, 1:2, 1:2] = torch.cuda.FloatTensor(x.size(0), 1, 1).normal_(1, 0.1)
return torch.nn.functional.grid_sample(x, grid = torch.nn.functional.affine_grid(theta, x.size()))
def train(epoch, net, criterion, optimizer, logfile, loader, device, wmloader=False, tune_all=True, ex_datas = [], ex_net = None, wm2_loader = None, n_classes=None, EWC_coef = 0., Fisher = None, init_params = None, EWC_immune = [], afs_bsize=0, extra_only = False):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_loss_wm = 0
correct = 0
total = 0
iteration = -1
wm_correct = 0
print_every = 5
l_lambda = 1.2
# update only the last layer
if not tune_all:
if type(net) is torch.nn.DataParallel:
net.module.freeze_hidden_layers()
else:
net.freeze_hidden_layers()
# get the watermark images
wminputs, wmtargets = [], []
if wmloader:
for wm_idx, (wminput, wmtarget) in enumerate(wmloader):
wminput, wmtarget = wminput.to(device), wmtarget.to(device)
wminputs.append(wminput)
wmtargets.append(wmtarget)
# the wm_idx to start from
wm_idx = np.random.randint(len(wminputs))
if afs_bsize > 0:
afs_idx = 0
for batch_idx, (inputs, targets) in enumerate(loader):
iteration += 1
inputs, targets = inputs.to(device), targets.to(device)
# add wmimages and targets
if wmloader:
inputs = torch.cat([inputs, wminputs[(wm_idx + batch_idx) % len(wminputs)]], dim=0)
targets = torch.cat([targets, wmtargets[(wm_idx + batch_idx) % len(wminputs)]], dim=0)
if afs_bsize > 0:
inputs = torch.cat([inputs, net.afs_inputs[afs_idx:afs_idx + afs_bsize]], dim = 0)
targets = torch.cat([targets, net.afs_targets[afs_idx:afs_idx + afs_bsize]], dim=0)
afs_idx = (afs_idx + afs_bsize) % net.afs_inputs.size(0)
# add data from extra sources
original_batch_size = targets.size(0)
extra_only_tag = True
for _loader in ex_datas:
_input, _target = next(_loader)
_input, _target = _input.to(device), _target.to(device)
if _target[0].item() < -1:
with torch.no_grad():
_, __target = torch.max(ex_net(_input).data, 1)
_target = (__target + _target + 20000)%n_classes
elif _target[0].item() == -1 or ex_net!=None:
with torch.no_grad():
_output = ex_net(_input)
_, _target = torch.max(_output.data, 1)
_target = _target.to(device)
if extra_only and extra_only_tag:
inputs = _input
targets = _target
extra_only_tag = False
else:
inputs = torch.cat([inputs, _input], dim=0)
targets = torch.cat([targets, _target], dim=0)
outputs = net(inputs)
loss = criterion(outputs, targets)
if EWC_coef > 0:
for param, fisher, init_param in zip(net.parameters(), Fisher, init_params):
if IsInside(param, EWC_immune):
continue
loss = loss + (0.5 * EWC_coef * fisher.clamp(max = 1. / optimizer.param_groups[0]['lr'] / EWC_coef) * ((param - init_param)**2)).sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * float(correct) / total, correct, total))
with open(logfile, 'a') as f:
f.write('Epoch: %d\n' % epoch)
f.write('Loss: %.3f | Acc: %.3f%% (%d/%d)\n'
% (train_loss / (batch_idx + 1), 100. * float(correct) / total, correct, total))
# train function in a teacher-student fashion
def train_teacher(epoch, net, criterion, optimizer, use_cuda, logfile, loader, wmloader):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
iteration = -1
# get the watermark images
wminputs, wmtargets = [], []
if wmloader:
for wm_idx, (wminput, wmtarget) in enumerate(wmloader):
if use_cuda:
wminput, wmtarget = wminput.cuda(), wmtarget.cuda()
wminputs.append(wminput)
wmtargets.append(wmtarget)
# the wm_idx to start from
wm_idx = np.random.randint(len(wminputs))
for batch_idx, (inputs, targets) in enumerate(loader):
iteration += 1
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
if wmloader:
# add wmimages and targets
inputs = torch.cat([inputs, wminputs[(wm_idx + batch_idx) % len(wminputs)]], dim=0)
targets = torch.cat([targets, wmtargets[(wm_idx + batch_idx) % len(wminputs)]], dim=0)
inputs, targets = Variable(inputs), Variable(targets)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * float(correct) / total, correct, total))
with open(logfile, 'a') as f:
f.write('Epoch: %d\n' % epoch)
f.write('Loss: %.3f | Acc: %.3f%% (%d/%d)\n'
% (train_loss / (batch_idx + 1), 100. * float(correct) / total, correct, total))
def test_afs(net, logfile):
net.eval()
inputs, targets = net.afs_inputs, net.afs_targets
criterion = torch.nn.CrossEntropyLoss()
with torch.no_grad():
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
loss = criterion(outputs, targets)
correct = predicted.eq(targets.data).cpu().sum()
total = inputs.size(0)
with open(logfile, 'a') as f:
f.write('Test(afw) results:\n')
print('Test(afw) results:')
f.write('Loss: %.3f | Acc: %.3f%% (%d/%d)\n'
% (loss, 100. * float(correct) / total, correct, total))
print('Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (loss, 100. * float(correct) / total, correct, total))
# Test function
def test(net, criterion, logfile, loader, device):
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(loader):
inputs, targets = inputs.to(device), targets.to(device)
with torch.no_grad():
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
loss = criterion(outputs, targets)
correct += predicted.eq(targets.data).cpu().sum()
test_loss += loss.item()
total += targets.size(0)
progress_bar(batch_idx, len(loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), 100. * float(correct) / total, correct, total))
with open(logfile, 'a') as f:
f.write('Test results:\n')
f.write('Loss: %.3f | Acc: %.3f%% (%d/%d)\n'
% (test_loss / (batch_idx + 1), 100. * float(correct) / total, correct, total))
# return the acc.
return 100. * correct / total
|
the-stack_0_27272
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2019 Chaintope Inc.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_equal,
)
from test_framework.blocktools import create_colored_transaction
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timewait = 120
def run_test (self):
# Mine some coins
self.nodes[0].generate(5, self.signblockprivkey_wif)
colorid1 = create_colored_transaction(2, 100, self.nodes[0])['color']
self.nodes[0].generate(1, self.signblockprivkey_wif)
self.sync_all()
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
caddr1 = [self.nodes[1].getnewaddress("", colorid1) for i in range(3)]
caddr2 = [self.nodes[2].getnewaddress("", colorid1) for i in range(3)]
caddrs = caddr1 + caddr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
[self.nodes[0].sendtoaddress(caddr, 1) for caddr in caddrs]
[self.nodes[0].sendtoaddress(caddr, 2) for caddr in caddrs]
self.nodes[0].generate(1, self.signblockprivkey_wif)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.0001)
ctxid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress("", colorid1), 2)
ctx1 = self.nodes[1].getrawtransaction(ctxid1, True)
# txid1 should have 2 input and 2 outputs
assert_equal(2, len(ctx1["vin"]))
assert_equal(2, len(ctx1["vout"]))
# one output should be 2, the other should be fee
v = [vout["value"] for vout in ctx1["vout"]]
v.sort()
assert_approx(v[0], 0.5, 0.0001)
assert_approx(v[1], 2)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.0001)
ctxid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress("", colorid1), 2)
ctx2 = self.nodes[2].getrawtransaction(ctxid2, True)
# txid2 should have 4 inputs and 3 outputs
assert_equal(4, len(ctx2["vin"]))
assert_equal(3, len(ctx2["vout"]))
# one output should be 2, the other should be fee
v = [vout["value"] for vout in ctx2["vout"]]
v.sort()
assert_approx(v[0], 1)
assert_approx(v[1], 1.5, 0.0002)
assert_approx(v[2], 2)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[2].generate(1, self.signblockprivkey_wif)
amt = self.nodes[2].getwalletinfo()['balance'][colorid1]
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress("", colorid1), amount=amt)
self.sync_all()
self.nodes[0].generate(1, self.signblockprivkey_wif)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'], [], "ALL", self.options.scheme)
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1, self.signblockprivkey_wif)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
|
the-stack_0_27273
|
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.annlowlevel import cachedtype
from pypy.annotation import model as annmodel
from pypy.jit.timeshifter import rvalue, rcontainer
from pypy.objspace.std.boolobject import W_BoolObject
class NewBoolDesc:
__metaclass__ = cachedtype
def __init__(self, hrtyper):
self.hrtyper = hrtyper
RGenOp = hrtyper.RGenOp
rtyper = hrtyper.rtyper
bk = rtyper.annotator.bookkeeper
s_w_bool = annmodel.unionof(bk.immutablevalue(W_BoolObject.w_False),
bk.immutablevalue(W_BoolObject.w_True))
r_w_bool = rtyper.getrepr(s_w_bool)
self.ll_False = r_w_bool.convert_const(W_BoolObject.w_False)
self.ll_True = r_w_bool.convert_const(W_BoolObject.w_True)
A = lltype.Array(lltype.typeOf(self.ll_False))
self.ll_bools = lltype.malloc(A, 2, immortal=True)
self.ll_bools[0] = self.ll_False
self.ll_bools[1] = self.ll_True
self.gv_bools = RGenOp.constPrebuiltGlobal(self.ll_bools)
self.boolsToken = RGenOp.arrayToken(A)
self.bools_gv = [RGenOp.constPrebuiltGlobal(self.ll_False),
RGenOp.constPrebuiltGlobal(self.ll_True)]
self.ptrkind = RGenOp.kindToken(r_w_bool.lowleveltype)
self.boolkind = RGenOp.kindToken(lltype.Bool)
ll_BoolObject = r_w_bool.rclass.getvtable()
self.BoolObjectBox = rvalue.redbox_from_prebuilt_value(RGenOp,
ll_BoolObject)
self.Falsebox = rvalue.redbox_from_prebuilt_value(RGenOp, False)
self.Truebox = rvalue.redbox_from_prebuilt_value(RGenOp, True)
self.boolboxes = [self.Falsebox, self.Truebox]
def _freeze_(self):
return True
def vboolfactory(self):
vbool = VirtualBool(self)
box = rvalue.PtrRedBox(self.ptrkind, known_nonzero=True)
box.content = vbool
vbool.ownbox = box
return vbool
def metafunc(self, jitstate, spacevoid, valuebox):
vbool = self.vboolfactory()
vbool.valuebox = valuebox
return vbool.ownbox
def getboolbox(self, jitstate, gv_bool, reverse=False):
if gv_bool.is_const:
flag = gv_bool.revealconst(lltype.Bool)
return self.boolboxes[flag ^ reverse]
else:
if reverse:
gv_bool = jitstate.curbuilder.genop1("bool_not", gv_bool)
return rvalue.IntRedBox(self.boolkind, gv_bool)
def genbooleq(self, jitstate, gv_bool1, gv_bool2, reverse=False):
if gv_bool1.is_const:
reverse ^= not gv_bool1.revealconst(lltype.Bool)
return self.getboolbox(jitstate, gv_bool2, reverse)
elif gv_bool2.is_const:
reverse ^= not gv_bool2.revealconst(lltype.Bool)
return self.getboolbox(jitstate, gv_bool1, reverse)
else:
# XXX maybe gv_bool1 == gv_bool2 :-)
curbuilder = jitstate.curbuilder
gv_int1 = curbuilder.genop1("cast_bool_to_int", gv_bool1)
gv_int2 = curbuilder.genop1("cast_bool_to_int", gv_bool2)
if reverse:
gv_res = curbuilder.genop2("int_ne", gv_int1, gv_int2)
else:
gv_res = curbuilder.genop2("int_eq", gv_int1, gv_int2)
return rvalue.IntRedBox(self.boolkind, gv_res)
class VirtualBool(rcontainer.VirtualContainer):
def __init__(self, newbooldesc):
self.newbooldesc = newbooldesc
#self.valuebox = ... set independently
def enter_block(self, incoming, memo):
contmemo = memo.containers
if self not in contmemo:
contmemo[self] = None
self.valuebox.enter_block(incoming, memo)
def force_runtime_container(self, jitstate):
desc = self.newbooldesc
valuebox = self.valuebox
if valuebox.is_constant():
value = valuebox.genvar.revealconst(lltype.Bool)
genvar = desc.bools_gv[value]
else:
gv_index = valuebox.getgenvar(jitstate)
gv_index = jitstate.curbuilder.genop1("cast_bool_to_int", gv_index)
genvar = jitstate.curbuilder.genop_getarrayitem(
desc.boolsToken,
desc.gv_bools,
gv_index)
self.ownbox.setgenvar_hint(genvar, known_nonzero=True)
self.ownbox.content = None
def freeze(self, memo):
contmemo = memo.containers
assert self not in contmemo # contmemo no longer used
result = contmemo[self] = FrozenBool(self.newbooldesc)
frozenbox = self.valuebox.freeze(memo)
result.fz_valuebox = frozenbox
return result
def copy(self, memo):
contmemo = memo.containers
assert self not in contmemo # contmemo no longer used
result = contmemo[self] = VirtualBool(self.newbooldesc)
result.valuebox = self.valuebox.copy(memo)
result.ownbox = self.ownbox.copy(memo)
return result
def replace(self, memo):
contmemo = memo.containers
assert self not in contmemo # contmemo no longer used
contmemo[self] = None
self.valuebox = self.valuebox.replace(memo)
self.ownbox = self.ownbox.replace(memo)
def op_getfield(self, jitstate, fielddesc):
if fielddesc.fieldindex == 0: # the __class__ field
return self.newbooldesc.BoolObjectBox
else:
# assume it is the 'boolval' field
return self.valuebox
def op_ptreq(self, jitstate, otherbox, reverse):
desc = self.newbooldesc
if otherbox.is_constant():
addr = otherbox.genvar.revealconst(llmemory.Address)
if addr == llmemory.cast_ptr_to_adr(desc.ll_False):
return desc.getboolbox(jitstate, self.valuebox.genvar,
not reverse)
elif addr == llmemory.cast_ptr_to_adr(desc.ll_True):
return desc.getboolbox(jitstate, self.valuebox.genvar,
reverse)
else:
return desc.boolboxes[False ^ reverse]
othercontent = otherbox.content
if not isinstance(othercontent, VirtualBool):
return None # no clue
return desc.genbooleq(jitstate,
self.valuebox.genvar,
othercontent.valuebox.genvar,
reverse)
class FrozenBool(rcontainer.FrozenContainer):
def __init__(self, newbooldesc):
self.newbooldesc = newbooldesc
#self.fz_valuebox initialized later
def exactmatch(self, vstruct, outgoingvarboxes, memo):
# XXX code duplication with rcontainer...
assert isinstance(vstruct, rcontainer.VirtualContainer)
contmemo = memo.containers
if self in contmemo:
ok = vstruct is contmemo[self]
if not ok:
outgoingvarboxes.append(vstruct.ownbox)
return ok
if vstruct in contmemo:
assert contmemo[vstruct] is not self
outgoingvarboxes.append(vstruct.ownbox)
return False
if not isinstance(vstruct, VirtualBool):
if not memo.force_merge:
raise rvalue.DontMerge
outgoingvarboxes.append(vstruct.ownbox)
return False
contmemo[self] = vstruct
contmemo[vstruct] = self
return self.fz_valuebox.exactmatch(vstruct.valuebox,
outgoingvarboxes,
memo)
def unfreeze(self, incomingvarboxes, memo):
contmemo = memo.containers
if self in contmemo:
return contmemo[self]
vbool = self.newbooldesc.vboolfactory()
ownbox = vbool.ownbox
contmemo[self] = ownbox
vbool.valuebox = self.fz_valuebox.unfreeze(incomingvarboxes, memo)
return ownbox
|
the-stack_0_27274
|
#! /usr/bin/python
#-*-coding: utf8-*-
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4 import uic
from qtui.model import Answer
import os, os.path
class TabPage(QWidget):
def __init__(self, content=None):
super(TabPage, self).__init__()
self.ui = uic.loadUi(os.path.join(os.environ['AUTOEXAM_FOLDER'], "qtui/ui/tabpage.ui"), self)
if content:
self.addContent(content)
def addContent(self, content):
self.ui.rightBox.setChecked(content.valid)
self.ui.fixedBox.setChecked(content.fixed_position)
self.ui.questionEdit.setPlainText(content.text)
def dump(self):
right = self.ui.rightBox.isChecked()
fixed = self.ui.fixedBox.isChecked()
text = str(self.ui.questionEdit.toPlainText().toUtf8()).decode('utf-8')
return Answer(right, fixed, text)
|
the-stack_0_27275
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import quandl
import scipy.optimize as sco
plt.style.use('fivethirtyeight')
np.random.seed(777)
quandl.ApiConfig.api_key = 'ZuxVUkHq16s-oe-bimo1'
stocks = ['AAPL','AMZN','GOOGL','FB']
data = quandl.get_table('WIKI/PRICES', ticker = stocks,
qopts = { 'columns': ['date', 'ticker', 'adj_close'] },
date = { 'gte': '2016-1-1', 'lte': '2017-12-31' }, paginate=True)
print(data.head())
df = data.set_index('date')
table = df.pivot(columns='ticker')
# By specifying col[1] in below list comprehension
# You can select the stock names under multi-level column
table.columns = [col[1] for col in table.columns]
table.head()
def plotStock(table):
plt.figure(figsize=(14, 7))
for c in table.columns.values:
plt.plot(table.index, table[c], lw=3, alpha=0.8,label=c)
plt.legend(loc='upper left', fontsize=12)
plt.show()
def plotVolatility(table):
plt.ylabel('price in $')
returns = table.pct_change()
plt.figure(figsize=(14, 7))
for c in returns.columns.values:
plt.plot(returns.index, returns[c], lw=3, alpha=0.8,label=c)
plt.legend(loc='upper right', fontsize=12)
plt.ylabel('daily returns')
plt.show()
def portfolio_annualised_performance(weights, mean_returns, cov_matrix):
returns = np.sum(mean_returns*weights ) *252
std = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(252)
return std, returns
def random_portfolios(num_portfolios, mean_returns, cov_matrix, risk_free_rate):
results = np.zeros((3,num_portfolios))
weights_record = []
for i in xrange(num_portfolios):
weights = np.random.random(4)
weights /= np.sum(weights)
weights_record.append(weights)
portfolio_std_dev, portfolio_return = portfolio_annualised_performance(weights, mean_returns, cov_matrix)
results[0,i] = portfolio_std_dev
results[1,i] = portfolio_return
results[2,i] = (portfolio_return - risk_free_rate) / portfolio_std_dev
return results, weights_record
def display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate):
results, weights = random_portfolios(num_portfolios,mean_returns, cov_matrix, risk_free_rate)
max_sharpe_idx = np.argmax(results[2])
sdp, rp = results[0,max_sharpe_idx], results[1,max_sharpe_idx]
max_sharpe_allocation = pd.DataFrame(weights[max_sharpe_idx],index=table.columns,columns=['allocation'])
max_sharpe_allocation.allocation = [round(i*100,2)for i in max_sharpe_allocation.allocation]
max_sharpe_allocation = max_sharpe_allocation.T
min_vol_idx = np.argmin(results[0])
sdp_min, rp_min = results[0,min_vol_idx], results[1,min_vol_idx]
min_vol_allocation = pd.DataFrame(weights[min_vol_idx],index=table.columns,columns=['allocation'])
min_vol_allocation.allocation = [round(i*100,2)for i in min_vol_allocation.allocation]
min_vol_allocation = min_vol_allocation.T
print ("-"*80)
print ("Maximum Sharpe Ratio Portfolio Allocation\n")
print ("Annualised Return:", round(rp,2))
print ("Annualised Volatility:", round(sdp,2))
print ("\n")
print (max_sharpe_allocation)
print ("-"*80)
print ("Minimum Volatility Portfolio Allocation\n")
print ("Annualised Return:", round(rp_min,2))
print ("Annualised Volatility:", round(sdp_min,2))
print ("\n")
print (min_vol_allocation)
plt.figure(figsize=(10, 7))
plt.scatter(results[0,:],results[1,:],c=results[2,:],cmap='YlGnBu', marker='o', s=10, alpha=0.3)
plt.colorbar()
plt.scatter(sdp,rp,marker='*',color='r',s=500, label='Maximum Sharpe ratio')
plt.scatter(sdp_min,rp_min,marker='*',color='g',s=500, label='Minimum volatility')
plt.title('Simulated Portfolio Optimization based on Efficient Frontier')
plt.xlabel('annualised volatility')
plt.ylabel('annualised returns')
plt.legend(labelspacing=0.8)
returns = table.pct_change()
mean_returns = returns.mean()
cov_matrix = returns.cov()
num_portfolios = 25000
risk_free_rate = 0.0178
print (display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate))
#plotVolatility(table)
|
the-stack_0_27276
|
load("@com_google_protobuf//:protobuf.bzl", _py_proto_library = "py_proto_library")
load("@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_grpc_library", "go_proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
_PY_SUFFIX = "_py"
_CC_SUFFIX = "_cc"
_GO_SUFFIX = "_go"
_GO_IMPORTPATH_PREFIX = "github.com/cncf/udpa/"
_COMMON_PROTO_DEPS = [
"@com_google_protobuf//:any_proto",
"@com_google_protobuf//:descriptor_proto",
"@com_google_protobuf//:duration_proto",
"@com_google_protobuf//:empty_proto",
"@com_google_protobuf//:struct_proto",
"@com_google_protobuf//:timestamp_proto",
"@com_google_protobuf//:wrappers_proto",
"@com_google_googleapis//google/api:http_proto",
"@com_google_googleapis//google/api:annotations_proto",
"@com_google_googleapis//google/rpc:status_proto",
"@com_envoyproxy_protoc_gen_validate//validate:validate_proto",
]
def _Suffix(d, suffix):
return d + suffix
def _LibrarySuffix(library_name, suffix):
# Transform //a/b/c to //a/b/c:c in preparation for suffix operation below.
if library_name.startswith("//") and ":" not in library_name:
library_name += ":" + Label(library_name).name
return _Suffix(library_name, suffix)
# TODO(htuch): has_services is currently ignored but will in future support
# gRPC stub generation.
# TODO(htuch): Add support for Go based on envoy_api as needed.
def udpa_proto_library(
name,
visibility = ["//visibility:private"],
srcs = [],
deps = [],
has_services = None):
native.proto_library(
name = name,
srcs = srcs,
deps = deps + _COMMON_PROTO_DEPS,
visibility = visibility,
)
pgv_cc_proto_library(
name = _Suffix(name, _CC_SUFFIX),
cc_deps = [_LibrarySuffix(d, _CC_SUFFIX) for d in deps] + [
"@com_google_googleapis//google/api:http_cc_proto",
"@com_google_googleapis//google/api:annotations_cc_proto",
"@com_google_googleapis//google/rpc:status_cc_proto",
],
deps = [":" + name],
visibility = visibility,
)
py_export_suffixes = []
_py_proto_library(
name = _Suffix(name, _PY_SUFFIX),
srcs = srcs,
default_runtime = "@com_google_protobuf//:protobuf_python",
protoc = "@com_google_protobuf//:protoc",
deps = [_LibrarySuffix(d, _PY_SUFFIX) for d in deps] + [
"@com_envoyproxy_protoc_gen_validate//validate:validate_py",
"@com_google_googleapis//google/rpc:status_py_proto",
"@com_google_googleapis//google/api:annotations_py_proto",
"@com_google_googleapis//google/api:http_py_proto",
"@com_google_googleapis//google/api:httpbody_py_proto",
],
visibility = visibility,
)
py_export_suffixes = ["_py", "_py_genproto"]
# Allow unlimited visibility for consumers
export_suffixes = ["", "_cc", "_cc_validate"] + py_export_suffixes
for s in export_suffixes:
native.alias(
name = name + "_export" + s,
actual = name + s,
visibility = ["//visibility:public"],
)
def udpa_cc_test(name, srcs, proto_deps):
native.cc_test(
name = name,
srcs = srcs,
deps = [_LibrarySuffix(d + "_export", _CC_SUFFIX) for d in proto_deps],
)
# This defines googleapis py_proto_library. The repository does not provide its definition and requires
# overriding it in the consuming project (see https://github.com/grpc/grpc/issues/19255 for more details).
def py_proto_library(name, deps = []):
srcs = [dep[:-6] + ".proto" if dep.endswith("_proto") else dep for dep in deps]
proto_deps = []
# py_proto_library in googleapis specifies *_proto rules in dependencies.
# By rewriting *_proto to *.proto above, the dependencies in *_proto rules are not preserved.
# As a workaround, manually specify the proto dependencies for the imported python rules.
if name == "annotations_py_proto":
proto_deps = proto_deps + [":http_py_proto"]
_py_proto_library(
name = name,
srcs = srcs,
default_runtime = "@com_google_protobuf//:protobuf_python",
protoc = "@com_google_protobuf//:protoc",
deps = proto_deps + ["@com_google_protobuf//:protobuf_python"],
visibility = ["//visibility:public"],
)
def udpa_proto_package(name = "pkg", srcs = [], deps = [], has_services = False, visibility = ["//visibility:public"]):
"""
Generate a single go_proto for all protos in the package.
Some packages may have multiple proto files, and therefore multiple blaze proto targets.
For golang, this is not ideal. Non-bazel users will have to manually generate the protos in a different structure.
Instead of generating a go_proto target per proto, this rule generates a single go_proto target
for the entire package. This better adheres to golang's import structure in the OSS world.
More information: https://github.com/envoyproxy/envoy/pull/8003
"""
if srcs == []:
srcs = native.glob(["*.proto"])
native.proto_library(
name = name,
srcs = srcs,
deps = deps + _COMMON_PROTO_DEPS,
visibility = visibility,
)
compilers = ["@io_bazel_rules_go//proto:go_proto", "//bazel:pgv_plugin_go"]
if has_services:
compilers = ["@io_bazel_rules_go//proto:go_grpc", "//bazel:pgv_plugin_go"]
go_proto_library(
name = _Suffix(name, _GO_SUFFIX),
compilers = compilers,
importpath = _Suffix(_GO_IMPORTPATH_PREFIX, native.package_name()),
proto = name,
visibility = ["//visibility:public"],
deps = [_LibrarySuffix(d, _GO_SUFFIX) for d in deps] + [
"@com_github_golang_protobuf//ptypes:go_default_library",
"@com_github_golang_protobuf//ptypes/any:go_default_library",
"@com_github_golang_protobuf//ptypes/duration:go_default_library",
"@com_github_golang_protobuf//ptypes/struct:go_default_library",
"@com_github_golang_protobuf//ptypes/timestamp:go_default_library",
"@com_github_golang_protobuf//ptypes/wrappers:go_default_library",
"@com_envoyproxy_protoc_gen_validate//validate:go_default_library",
"@com_google_googleapis//google/api:annotations_go_proto",
"@com_google_googleapis//google/rpc:status_go_proto",
],
)
def udpa_go_test(name, srcs, importpath, proto_deps):
go_test(
name = name,
srcs = srcs,
importpath = _GO_IMPORTPATH_PREFIX + importpath,
deps = [_LibrarySuffix(d, _GO_SUFFIX) for d in proto_deps],
)
|
the-stack_0_27277
|
from kivy.uix.boxlayout import BoxLayout
from kivy.app import App
from kivy.lang.builder import Builder
from kivy.core.window import Window
from kivy.logger import Logger
kv = """
#:import rgba kivy.utils.rgba
<TitleBar>:
id:title_bar
size_hint: 1,0.1
pos_hint : {'top':0.5}
BoxLayout:
orientation:"vertical"
BoxLayout:
Button:
text: "Click-able"
draggable:False
Button:
text: "non Click-able"
Button:
text: "non Click-able"
BoxLayout:
draggable:False
Button:
text: "Click-able"
Button:
text: "click-able"
Button:
text: "Click-able"
FloatLayout:
"""
class TitleBar(BoxLayout):
pass
class CustomTitleBar(App):
def build(self):
root = Builder.load_string(kv)
Window.custom_titlebar = True
title_bar = TitleBar()
root.add_widget(title_bar)
if Window.set_custom_titlebar(title_bar):
Logger.info("Window: setting custom titlebar successful")
else:
Logger.info("Window: setting custom titlebar "
"Not allowed on this system ")
self.title = "MyApp"
return root
if __name__ == "__main__":
CustomTitleBar().run()
|
the-stack_0_27278
|
import numpy as np
import sklearn
def discrete_entropy(ys):
"""Compute discrete mutual information."""
num_factors = ys.shape[0]
h = np.zeros(num_factors)
for j in range(num_factors):
h[j] = sklearn.metrics.mutual_info_score(ys[j, :], ys[j, :])
return h
def discrete_mutual_info(mus, ys):
"""Compute discrete mutual information."""
num_codes = mus.shape[0]
num_factors = ys.shape[0]
m = np.zeros([num_codes, num_factors])
for i in range(num_codes):
for j in range(num_factors):
m[i, j] = sklearn.metrics.mutual_info_score(ys[j, :], mus[i, :])
return m
def _histogram_discretize(target, num_bins=30):
"""Discretization based on histograms."""
discretized = np.zeros_like(target)
for i in range(target.shape[0]):
discretized[i, :] = np.digitize(target[i, :], np.histogram(
target[i, :], num_bins)[1][:-1])
return discretized
def make_discretizer(target, num_bins=30,
discretizer_fn=_histogram_discretize):
"""Wrapper that creates discretizers."""
return discretizer_fn(target, num_bins)
def compute_mig(representation, gt_factors, k=1):
"""Computes score based on both training and testing codes and factors.
k is the size of subspace that should be disentangled from other parts.
"""
representation = representation.cpu().detach().numpy().T
gt_factors = gt_factors.cpu().detach().numpy().T
discretized_representation = make_discretizer(representation)
discretized_gt_factors = make_discretizer(gt_factors)
m = discrete_mutual_info(discretized_representation, discretized_gt_factors)
z_dim = m.shape[0]
if k > 1:
m_comp = np.row_stack([m[i:i+k, :].mean(axis=0) for i in range(0,z_dim, k)])
else:
m_comp = m
entropy = discrete_entropy(discretized_gt_factors)
sorted_m = np.sort(m_comp, axis=0)[::-1]
score = np.mean(np.divide(sorted_m[0, :] - sorted_m[1, :], entropy[:]))
return score, m_comp
|
the-stack_0_27279
|
import os
import tkinter
from tkinter import Menu
from tkbuilder.panel_templates.widget_panel.widget_panel import AbstractWidgetPanel
from tkbuilder.panel_templates.image_canvas_panel.image_canvas_panel import ImageCanvasPanel
from tkbuilder.image_readers.geotiff_reader import GeotiffImageReader
from tkinter import filedialog
from tkbuilder.example_apps.geotiff_viewer.panels.band_selection import BandSelection
class GeotiffViewer(AbstractWidgetPanel):
geotiff_image_panel = ImageCanvasPanel # type: ImageCanvasPanel
band_selection_panel = BandSelection # type: BandSelection
image_reader = None # type: GeotiffImageReader
def __init__(self, master):
self.master = master
master_frame = tkinter.Frame(master)
AbstractWidgetPanel.__init__(self, master_frame)
widgets_list = ["geotiff_image_panel", "band_selection_panel"]
self.init_w_vertical_layout(widgets_list)
self.geotiff_image_panel.set_canvas_size(800, 1080)
self.geotiff_image_panel.canvas.set_current_tool_to_pan()
menubar = Menu()
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", command=self.select_file)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.exit)
# create more pulldown menus
popups_menu = Menu(menubar, tearoff=0)
popups_menu.add_command(label="Main Controls", command=self.exit)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_cascade(label="Popups", menu=popups_menu)
master.config(menu=menubar)
master_frame.pack()
self.pack()
self.band_selection_panel.red_selection.on_selection(self.callback_update_red_band)
self.band_selection_panel.green_selection.on_selection(self.callback_update_green_band)
self.band_selection_panel.blue_selection.on_selection(self.callback_update_blue_band)
self.band_selection_panel.alpha_selection.on_selection(self.callback_update_alpha_band)
def exit(self):
self.quit()
def select_file(self, fname=None):
if fname is None:
fname = filedialog.askopenfilename(initialdir=os.path.expanduser("~"),
title="Select file",
filetypes=(("tiff files", ("*.tif", "*.tiff", "*.TIF", "*.TIFF")),
("all files", "*.*"))
)
self.image_reader = GeotiffImageReader(fname)
self.geotiff_image_panel.canvas.set_image_reader(self.image_reader)
self.populate_band_selections()
def populate_band_selections(self):
bands = self.image_reader.n_bands
band_selections = [str(band) for band in range(bands)]
band_selections.append("None")
self.band_selection_panel.red_selection.update_combobox_values(band_selections)
self.band_selection_panel.green_selection.update_combobox_values(band_selections)
self.band_selection_panel.blue_selection.update_combobox_values(band_selections)
self.band_selection_panel.alpha_selection.update_combobox_values(band_selections)
self.band_selection_panel.red_selection.set(str(self.image_reader.display_bands[0]))
self.band_selection_panel.green_selection.set(str(self.image_reader.display_bands[1]))
self.band_selection_panel.blue_selection.set(str(self.image_reader.display_bands[2]))
if len(self.image_reader.display_bands) > 3:
self.band_selection_panel.alpha_selection.set(str(self.image_reader.display_bands[3]))
else:
self.band_selection_panel.alpha_selection.set("None")
def callback_update_red_band(self, event):
red_band = self.band_selection_panel.red_selection.get()
band_num = 0
if red_band == "None":
if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(band_num)
else:
if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)
self.image_reader.display_bands[band_num] = int(red_band)
self.geotiff_image_panel.canvas.update_current_image()
def callback_update_green_band(self, event):
green_band = self.band_selection_panel.green_selection.get()
band_num = 1
if green_band == "None":
if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(1)
else:
if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(1)
self.image_reader.display_bands[1] = int(green_band)
self.geotiff_image_panel.canvas.update_current_image()
def callback_update_blue_band(self, event):
band_num = 2
blue_band = self.band_selection_panel.blue_selection.get()
if blue_band == "None":
if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(band_num)
else:
if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)
self.image_reader.display_bands[band_num] = int(blue_band)
self.geotiff_image_panel.canvas.update_current_image()
def callback_update_alpha_band(self, event):
alpha_band = self.band_selection_panel.alpha_selection.get()
band_num = 3
if len(self.image_reader.display_bands) == 3:
self.image_reader.display_bands.append(band_num)
if alpha_band == "None":
self.image_reader.display_bands = self.image_reader.display_bands[0:3]
else:
if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:
self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)
self.image_reader.display_bands[band_num] = int(alpha_band)
self.geotiff_image_panel.canvas.update_current_image()
if __name__ == '__main__':
root = tkinter.Tk()
app = GeotiffViewer(root)
root.mainloop()
|
the-stack_0_27281
|
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import absolute_import
import phue
import traceback
import sys
from constants import HUE_ROOM, HUE_BRIDGE_IP
if sys.version_info < (3, 0):
input = raw_input # pylint: disable=E0602
def on_dash(bridge, data):
try:
bridge.toggle()
except OSError as e:
print(traceback.format_exc(), file=sys.stderr)
def listen_on_stdin(bridge):
print("Waiting for input", file=sys.stderr)
try:
while True:
data = input()
if data.startswith('Hello Dash button'):
on_dash(bridge, data)
except KeyboardInterrupt:
print('Ctrl-C', file=sys.stderr)
except:
print('Unexpected error!', file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
raise
class BridgeWrapper(object):
def __init__(self, bridge):
self.bridge = bridge
def lights_are_on(self):
if HUE_ROOM is not None:
room_lights = self.bridge.get_group(HUE_ROOM, 'lights')
for light in self.bridge.lights:
if HUE_ROOM is None or str(light.light_id) in room_lights:
if light.on:
return True
return False
def set_lights(self, on):
print("Turning lights {}".format("on" if on else "off"))
if HUE_ROOM is not None:
room_lights = self.bridge.get_group(HUE_ROOM, 'lights')
for light in self.bridge.lights:
if HUE_ROOM is None or str(light.light_id) in room_lights:
light.on = on
def toggle(self):
self.set_lights(not self.lights_are_on())
if __name__ == '__main__':
bridge = phue.Bridge(HUE_BRIDGE_IP)
bridge.connect()
bridge = BridgeWrapper(bridge)
listen_on_stdin(bridge)
|
the-stack_0_27283
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from mock import patch
from oslo_config import cfg
from oslotest.base import BaseTestCase
from stevedore.driver import DriverManager
from stevedore.extension import Extension
from watcher_metering.load.loader import DriverLoader
from watcher_metering.tests.loader._fixtures import ConfFixture
from watcher_metering.tests.loader._fixtures import FakeDriverNoGroup
from watcher_metering.tests.loader._fixtures import FakeDriverNoOpt
from watcher_metering.tests.loader._fixtures import FakeDriverWithExternalOpts
from watcher_metering.tests.loader._fixtures import FakeDriverWithOpts
class TestDriverLoader(BaseTestCase):
# patches to be applied for each test in this test suite
patches = []
def setUp(self):
super(TestDriverLoader, self).setUp()
# To load the drivers without using the config file
self.useFixture(ConfFixture())
def _fake_parse(self, *args, **kw):
return cfg.ConfigOpts._parse_cli_opts(cfg.CONF, [])
cfg.CONF._parse_cli_opts = _fake_parse
# First dependency to be returned
self.no_group_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=FakeDriverNoGroup.get_name(),
entry_point="%s:%s" % (FakeDriverNoGroup.__module__,
FakeDriverNoGroup.__name__),
plugin=FakeDriverNoGroup,
obj=None,
),
namespace=FakeDriverNoGroup.namespace(),
)
# 2nd dependency to be returned
self.with_ext_opts_driver_manager = DriverManager.make_test_instance(
extension=Extension(
name=FakeDriverWithExternalOpts.get_name(),
entry_point="%s:%s" % (FakeDriverWithExternalOpts.__module__,
FakeDriverWithExternalOpts.__name__),
plugin=FakeDriverWithExternalOpts,
obj=None,
),
namespace=FakeDriverWithExternalOpts.namespace(),
)
self.patches.extend([
# patch.object(cfg, "ConfigOpts", ),
])
# Applies all of our patches before each test
for _patch in self.patches:
_patch.start()
def tearDown(self):
super(TestDriverLoader, self).tearDown()
for _patch in self.patches:
_patch.stop()
@patch("watcher_metering.load.loader.DriverManager")
def test_load_driver_no_opt(self, m_driver_manager):
m_driver_manager.return_value = DriverManager.make_test_instance(
extension=Extension(
name=FakeDriverNoOpt.get_name(),
entry_point="%s:%s" % (FakeDriverNoOpt.__module__,
FakeDriverNoOpt.__name__),
plugin=FakeDriverNoOpt,
obj=None,
),
namespace=FakeDriverNoOpt.namespace(),
)
loader_manager = DriverLoader(
conf=cfg.CONF,
namespace='TESTING',
name='fake_no_opt'
)
loaded_driver = loader_manager.load()
self.assertEqual(
isinstance(loaded_driver, FakeDriverNoOpt),
True
)
@patch("watcher_metering.load.loader.DriverManager")
def test_load_driver_no_group(self, m_driver_manager):
m_driver_manager.return_value = DriverManager.make_test_instance(
extension=Extension(
name=FakeDriverNoGroup.get_name(),
entry_point="%s:%s" % (FakeDriverNoGroup.__module__,
FakeDriverNoGroup.__name__),
plugin=FakeDriverNoGroup,
obj=None,
),
namespace=FakeDriverNoGroup.namespace(),
)
loader_manager = DriverLoader(
conf=cfg.CONF,
namespace='',
name='fake_no_group'
)
loaded_driver = loader_manager.load()
self.assertEqual(hasattr(loaded_driver, "test_opt"), True)
self.assertEqual(loaded_driver.test_opt, "fake_no_group")
@patch("watcher_metering.load.loader.DriverManager")
def test_load_driver_with_opts(self, m_driver_manager):
m_driver_manager.return_value = DriverManager.make_test_instance(
extension=Extension(
name=FakeDriverWithOpts.get_name(),
entry_point="%s:%s" % (FakeDriverWithOpts.__module__,
FakeDriverWithOpts.__name__),
plugin=FakeDriverWithOpts,
obj=None,
),
namespace=FakeDriverWithOpts.namespace(),
)
loader_manager = DriverLoader(
conf=cfg.CONF,
namespace='TESTING',
name='fake_with_opts'
)
loaded_driver = loader_manager.load()
self.assertEqual(hasattr(loaded_driver, "test_opt"), True)
self.assertEqual(loaded_driver.test_opt, "fake_with_opts")
@patch("watcher_metering.load.loader.DriverManager")
def test_load_driver_with_external_opts(self, m_driver_manager):
m_driver_manager.return_value = DriverManager.make_test_instance(
extension=Extension(
name=FakeDriverWithExternalOpts.get_name(),
entry_point="%s:%s" % (FakeDriverWithExternalOpts.__module__,
FakeDriverWithExternalOpts.__name__),
plugin=FakeDriverWithExternalOpts,
obj=None,
),
namespace=FakeDriverWithExternalOpts.namespace(),
)
loader_manager = DriverLoader(
conf=cfg.CONF,
namespace='TESTING',
name='fake_with_ext_opts'
)
loaded_driver = loader_manager.load()
self.assertEqual(
hasattr(loaded_driver, "fake__test_external_opt"),
True
)
self.assertEqual(
loaded_driver.fake__test_external_opt,
"fake_with_ext_opts"
)
|
the-stack_0_27284
|
__author__ = "Giacomo Trifilo <[email protected]>"
import unittest
from unicon import Connection
############################################
# Please change follow parameters as needed
name = "E5-37-C3850"
username = "lab"
password = "lab"
start = "telnet 10.51.66.5 2013"
tftp_image = "tftp://172.18.200.210/BB_IMAGES/polaris_dev/rp_super_universalk9.edison.bin"
############################################
os = "iosxe"
chassis_type = "single_rp"
platform = "cat3k"
class TestIosXE(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.con = Connection(hostname=name,
username=username,
tacacs_password=password,
start=[start],
os=os,
platform=platform)
cls.con.connect()
@classmethod
def tearDownClass(cls):
cls.con.disconnect()
def test_linux_exec(self):
output = self.con.shellexec("whoami")
assert output == "root"
assert self.con.state_machine.current_state == "enable"
def test_reload_1(self):
self.con.config("boot manual")
self.con.reload(image_to_boot=tftp_image, timeout=1200)
assert self.con.state_machine.current_state == "enable"
def test_reload_2(self):
self.con.config("no boot manual")
self.con.config("boot system {}".format(tftp_image))
self.con.reload(timeout=1200)
assert self.con.state_machine.current_state == "enable"
def test_delete_file(self):
filename = "i_should_not_be_here"
self.con.shellexec("touch /flash/{}".format(filename))
self.con.execute("delete flash:{}".format(filename))
assert "No such file or directory" in \
self.con.shellexec("ls /flash/{}".format(filename))
def test_write_erase(self):
self.con.execute("write erase")
self.con.copy(source="running-conf", dest="startup-config")
def test_rommon(self):
self.con.execute("show ip interface brief")
self.con.rommon("MANUAL_BOOT=yes", end_state="rommon")
self.con.rommon("set", end_state="enable" )
self.con.execute("show ip interface brief")
|
the-stack_0_27287
|
# Part of the awpa package: https://github.com/pyga/awpa
# See LICENSE for copyright.
import io
from .pgen2 import tokenize
def detect_future_features(grammar, source):
token = grammar.token
have_docstring = False
gen = tokenize.generate_tokens(token, io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, token.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == 'from':
tp, value = advance()
if tp != token.NAME or value != '__future__':
break
tp, value = advance()
if tp != token.NAME or value != 'import':
break
tp, value = advance()
if tp == token.OP and value == '(':
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ',':
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
def decode_bytes_using_source_encoding(b):
encoding = tokenize.detect_encoding(io.BytesIO(b).readline)[0]
return b.decode(encoding)
def read_file_using_source_encoding(filename):
with open(filename, 'rb') as infile:
encoding = tokenize.detect_encoding(infile.readline)[0]
with io.open(filename, 'r', encoding=encoding) as infile_with_encoding:
return infile_with_encoding.read()
|
the-stack_0_27290
|
""" Module extracts the feature importance scores of each feature for every classifier """
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
# from sklearn.svm import SVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
class FeatureImportance:
""" Class to extract the feature importance scores. Uses optimal hyperparameters from GridSearch results """
def __init__(self, features, labels, feature_names):
self.skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
self.features = features
self.labels = labels
self.feature_names = feature_names
self.feature_importances = np.zeros((10, 4, 16))
def get_importance_scores(self):
""" Prints feature importance scores """
index = 0
for train_index, _ in self.skf.split(self.features, self.labels):
classifier_SVC = SVC(kernel="linear")
classifier_logistic_regression = LogisticRegression(
max_iter=1500, C=1.2, solver="lbfgs"
)
classifier_random_forest = RandomForestClassifier(
criterion="entropy", max_depth=10, max_features="log2"
)
classifier_decision_tree = DecisionTreeClassifier(
max_leaf_nodes=15, min_samples_leaf=10
)
classifier_SVC.fit(
self.features.iloc[train_index], self.labels.iloc[train_index]
)
classifier_logistic_regression.fit(
self.features.iloc[train_index], self.labels.iloc[train_index]
)
classifier_random_forest.fit(
self.features.iloc[train_index], self.labels.iloc[train_index]
)
classifier_decision_tree.fit(
self.features.iloc[train_index], self.labels.iloc[train_index]
)
self.feature_importances[index] = [
classifier_SVC.coef_[0],
classifier_logistic_regression.coef_[0],
classifier_random_forest.feature_importances_,
classifier_decision_tree.feature_importances_,
]
index += 1
df_mean = pd.DataFrame(
data=self.feature_importances.mean(axis=0),
index=["SVM", "Logistic Regression", "Random Forest", "Decision Tree"],
columns=self.feature_names,
)
df_max = pd.DataFrame(
data=self.feature_importances.max(axis=0),
index=["SVM", "Logistic Regression", "Random Forest", "Decision Tree"],
columns=self.feature_names,
)
df_min = pd.DataFrame(
data=self.feature_importances.min(axis=0),
index=["SVM", "Logistic Regression", "Random Forest", "Decision Tree"],
columns=self.feature_names,
)
df_mean.to_csv(
"src/analysis/feature_importance/out_mean.zip",
index=False,
compression=dict(method="zip", archive_name="out_mean.csv"),
)
df_max.to_csv(
"src/analysis/feature_importance/out_max.zip",
index=False,
compression=dict(method="zip", archive_name="out_max.csv"),
)
df_min.to_csv(
"src/analysis/feature_importance/out_min.zip",
index=False,
compression=dict(method="zip", archive_name="out_min.csv"),
)
|
the-stack_0_27291
|
from googletrans import Translator
from textblob import TextBlob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
def get_similarity(tweet_prep, column):
senti = tweet_prep.loc[:10, [column]]
senti['polarity'] = pd.DataFrame(senti[column]).apply(lambda x: TextBlob(x[column]).sentiment.polarity, axis = 1)
senti
def analize_sentiment(tweet):
analysis = TextBlob(tweet)
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
def showPieChart(positive, neutral, negative):
pc = plt.figure(figsize = (7, 7))
plt.pie([positive, neutral, negative],
autopct = '%1.1f%%',
colors = ['green','white','red'],
explode = (0.1, 0.1, 0.1),
startangle = 140)
plt.show()
def get_sentiment(tweet_prep, column):
get_similarity(tweet_prep, column)
senti = tweet_prep.loc[:, [column]]
senti['sentiment'] = np.array([analize_sentiment(tweet) for tweet in senti[column]])
pos_tweets = [tweet for index, tweet in enumerate(senti[column]) if senti['sentiment'][index] > 0]
neu_tweets = [tweet for index, tweet in enumerate(senti[column]) if senti['sentiment'][index] == 0]
neg_tweets = [tweet for index, tweet in enumerate(senti[column]) if senti['sentiment'][index] < 0]
showPieChart(positive=len(pos_tweets), neutral=len(neu_tweets), negative=len(neg_tweets))
return pos_tweets, neu_tweets, neg_tweets
def get_tweets_sentimen(pos, neu, neg):
translator = Translator()
print(f'\n🟢 Positive \n{translator.translate(pos[random.randint(0, len(pos))], dest="id").text} \n')
print(f'⚪ Neutral \n{translator.translate(neu[random.randint(0, len(neu))], dest="id").text} \n')
print(f'🔴 Negative \n{translator.translate(neg[random.randint(0, len(neg))], dest="id").text}')
|
the-stack_0_27293
|
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
Based on code from fnmatch.py file distributed with Python 2.6.
Licensed under PSF License (see LICENSE.PSF file).
Changes to original fnmatch module:
- translate function supports ``*`` and ``**`` similarly to fnmatch C library
"""
import os
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache = {}
LEFT_BRACE = re.compile(
r"""
(?: ^ | [^\\] ) # Beginning of string or a character besides "\"
\{ # "{"
""", re.VERBOSE
)
RIGHT_BRACE = re.compile(
r"""
(?: ^ | [^\\] ) # Beginning of string or a character besides "\"
\} # "}"
""", re.VERBOSE
)
NUMERIC_RANGE = re.compile(
r"""
( # Capture a number
[+-] ? # Zero or one "+" or "-" characters
\d + # One or more digits
)
\.\. # ".."
( # Capture a number
[+-] ? # Zero or one "+" or "-" characters
\d + # One or more digits
)
""", re.VERBOSE
)
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
- ``*`` matches everything except path separator
- ``**`` matches everything
- ``?`` matches any single character
- ``[seq]`` matches any character in seq
- ``[!seq]`` matches any char not in seq
- ``{s1,s2,s3}`` matches any of the strings given (separated by commas)
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normpath(name).replace(os.sep, "/")
return fnmatchcase(name, pat)
def cached_translate(pat):
if not pat in _cache:
res, num_groups = translate(pat)
regex = re.compile(res)
_cache[pat] = regex, num_groups
return _cache[pat]
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
regex, num_groups = cached_translate(pat)
match = regex.match(name)
if not match:
return False
pattern_matched = True
for (num, (min_num, max_num)) in zip(match.groups(), num_groups):
if num[0] == '0' or not (min_num <= int(num) <= max_num):
pattern_matched = False
break
return pattern_matched
def translate(pat, nested=False):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
index, length = 0, len(pat) # Current index and length of pattern
brace_level = 0
in_brackets = False
result = ''
is_escaped = False
matching_braces = (len(LEFT_BRACE.findall(pat)) ==
len(RIGHT_BRACE.findall(pat)))
numeric_groups = []
while index < length:
current_char = pat[index]
index += 1
if current_char == '*':
pos = index
if pos < length and pat[pos] == '*':
result += '.*'
else:
result += '[^/]*'
elif current_char == '?':
result += '.'
elif current_char == '[':
if in_brackets:
result += '\\['
else:
pos = index
has_slash = False
while pos < length and pat[pos] != ']':
if pat[pos] == '/' and pat[pos-1] != '\\':
has_slash = True
break
pos += 1
if has_slash:
result += '\\[' + pat[index:(pos + 1)] + '\\]'
index = pos + 2
else:
if index < length and pat[index] in '!^':
index += 1
result += '[^'
else:
result += '['
in_brackets = True
elif current_char == '-':
if in_brackets:
result += current_char
else:
result += '\\' + current_char
elif current_char == ']':
result += current_char
in_brackets = False
elif current_char == '{':
pos = index
has_comma = False
while pos < length and (pat[pos] != '}' or is_escaped):
if pat[pos] == ',' and not is_escaped:
has_comma = True
break
is_escaped = pat[pos] == '\\' and not is_escaped
pos += 1
if not has_comma and pos < length:
num_range = NUMERIC_RANGE.match(pat[index:pos])
if num_range:
numeric_groups.append(map(int, num_range.groups()))
result += r"([+-]?\d+)"
else:
inner_result, inner_groups = translate(pat[index:pos],
nested=True)
result += '\\{%s\\}' % (inner_result,)
numeric_groups += inner_groups
index = pos + 1
elif matching_braces:
result += '(?:'
brace_level += 1
else:
result += '\\{'
elif current_char == ',':
if brace_level > 0 and not is_escaped:
result += '|'
else:
result += '\\,'
elif current_char == '}':
if brace_level > 0 and not is_escaped:
result += ')'
brace_level -= 1
else:
result += '\\}'
elif current_char == '/':
if pat[index:(index + 3)] == "**/":
result += "(?:/|/.*/)"
index += 3
else:
result += '/'
elif current_char != '\\':
result += re.escape(current_char)
if current_char == '\\':
if is_escaped:
result += re.escape(current_char)
is_escaped = not is_escaped
else:
is_escaped = False
if not nested:
result = r'(?s)%s\Z' % result
return result, numeric_groups
|
the-stack_0_27295
|
from mechanics import colors
from mechanics.game_messages import Message
def heal(*args, **kwargs):
#I NEED HEALING - Genji
entity = args[0]
amount = kwargs.get('amount')
results = []
if entity.fighter.hp == entity.fighter.max_hp:
results.append({'consumed': False, 'message': Message('Sua vida ja esta completa!', colors.yellow)})
else:
entity.fighter.heal(amount)
results.append({'consumed': True, 'message': Message('Suas feridas se cicatrizam aos poucos', colors.green)})
return results
def cast_lightning(*args, **kwargs):
#Feitico de raio
caster = args[0]
entities = kwargs.get('entities')
game_map = kwargs.get('game_map')
damage = kwargs.get('damage')
maximum_range = kwargs.get('maximum_range')
results = []
target = None
closest_distance = maximum_range + 1
for entity in entities:
if entity.fighter and entity != caster and game_map.fov[entity.x, entity.y]:
distance = caster.distance_to(entity)
if distance < closest_distance:
target = entity
closest_distance = distance
if target:
results.append({'consumed': True, 'target': target, 'message': Message('Um raio atinge o {0} fazendo um barulho estrondoso! Levou {1} de dano!'.format(target.name, damage))})
results.extend(target.fighter.take_damage(damage))
else:
results.append({'consumed': False, 'target': None, 'message': Message('Nao ha inimigo proximo o suficiente para atingir', colors.red)})
return results
def cast_fireball(*args, **kwargs):
#Feitico de bola de fogo
entities = kwargs.get('entities')
game_map = kwargs.get('game_map')
damage = kwargs.get('damage')
radius = kwargs.get('radius')
target_x = kwargs.get('target_x')
target_y = kwargs.get('target_y')
results = []
if not game_map.fov[target_x, target_y]:
results.append({'consumed': False, 'message': Message('Voce nao pode almejar um piso fora do seu campo de visao', colors.yellow)})
return results
results.append({'consumed': True, 'message': Message('A bola de fogo explode, queimando tudo no raio de {0} pisos!'.format(radius), colors.orange)})
for entity in entities:
if entity.distance(target_x, target_y) <= radius and entity.fighter:
results.append({'message': Message('O {0} se queima, levando {1} de dano'.format(entity.name, damage), colors.orange)})
results.extend(entity.fighter.take_damage(damage))
return results
|
the-stack_0_27296
|
PI_CONSTANT = 3.141592653589793
a = float(input())
b = float(input())
c = float(input())
n = int(input())
volume_truck = a * b * c
volume_barrel = 0
count_of_barrels = -1
for x in range(0, n):
r = float(input())
h = float(input())
volume_barrel += PI_CONSTANT * r * r * h
volume_left = volume_truck - volume_barrel
count_of_barrels += 1
if volume_left < 0:
break
if volume_truck < volume_barrel:
print(f'Truck is full. {count_of_barrels} on board!')
else:
print(f'All barrels on board. Capacity left - {volume_left:.2f}.')
|
the-stack_0_27297
|
"""
This is a convenient container gathering all the main
search methods for the various database tables.
It is intended to be used e.g. as
> from evennia.utils import search
> match = search.objects(...)
Note that this is not intended to be a complete listing of all search
methods! You need to refer to the respective manager to get all
possible search methods. To get to the managers from your code, import
the database model and call its 'objects' property.
Also remember that all commands in this file return lists (also if
there is only one match) unless noted otherwise.
Example: To reach the search method 'get_object_with_account'
in evennia/objects/managers.py:
> from evennia.objects.models import ObjectDB
> match = Object.objects.get_object_with_account(...)
"""
# Import the manager methods to be wrapped
from django.contrib.contenttypes.models import ContentType
# limit symbol import from API
__all__ = (
"search_object",
"search_account",
"search_script",
"search_message",
"search_channel",
"search_help_entry",
"search_object_tag",
"search_script_tag",
"search_account_tag",
"search_channel_tag",
)
# import objects this way to avoid circular import problems
ObjectDB = ContentType.objects.get(app_label="objects", model="objectdb").model_class()
AccountDB = ContentType.objects.get(app_label="accounts", model="accountdb").model_class()
ScriptDB = ContentType.objects.get(app_label="scripts", model="scriptdb").model_class()
Msg = ContentType.objects.get(app_label="comms", model="msg").model_class()
Channel = ContentType.objects.get(app_label="comms", model="channeldb").model_class()
HelpEntry = ContentType.objects.get(app_label="help", model="helpentry").model_class()
Tag = ContentType.objects.get(app_label="typeclasses", model="tag").model_class()
# -------------------------------------------------------------------
# Search manager-wrappers
# -------------------------------------------------------------------
#
# Search objects as a character
#
# NOTE: A more powerful wrapper of this method
# is reachable from within each command class
# by using self.caller.search()!
#
# def object_search(self, ostring=None,
# attribute_name=None,
# typeclass=None,
# candidates=None,
# exact=True):
#
# Search globally or in a list of candidates and return results.
# The result is always a list of Objects (or the empty list)
#
# Arguments:
# ostring: (str) The string to compare names against. By default (if
# not attribute_name is set), this will search object.key
# and object.aliases in order. Can also be on the form #dbref,
# which will, if exact=True be matched against primary key.
# attribute_name: (str): Use this named ObjectAttribute to match ostring
# against, instead of the defaults.
# typeclass (str or TypeClass): restrict matches to objects having
# this typeclass. This will help speed up global searches.
# candidates (list obj ObjectDBs): If supplied, search will only be
# performed among the candidates in this list. A common list
# of candidates is the contents of the current location.
# exact (bool): Match names/aliases exactly or partially. Partial
# matching matches the beginning of words in the names/aliases,
# using a matching routine to separate multiple matches in
# names with multiple components (so "bi sw" will match
# "Big sword"). Since this is more expensive than exact
# matching, it is recommended to be used together with
# the objlist keyword to limit the number of possibilities.
# This keyword has no meaning if attribute_name is set.
#
# Returns:
# A list of matching objects (or a list with one unique match)
# def object_search(self, ostring, caller=None,
# candidates=None,
# attribute_name=None):
#
search_object = ObjectDB.objects.object_search
search_objects = search_object
object_search = search_object
objects = search_objects
#
# Search for accounts
#
# account_search(self, ostring)
# Searches for a particular account by name or
# database id.
#
# ostring = a string or database id.
#
search_account = AccountDB.objects.account_search
search_accounts = search_account
account_search = search_account
accounts = search_accounts
#
# Searching for scripts
#
# script_search(self, ostring, obj=None, only_timed=False)
#
# Search for a particular script.
#
# ostring - search criterion - a script ID or key
# obj - limit search to scripts defined on this object
# only_timed - limit search only to scripts that run
# on a timer.
#
search_script = ScriptDB.objects.script_search
search_scripts = search_script
script_search = search_script
scripts = search_scripts
#
# Searching for communication messages
#
#
# message_search(self, sender=None, receiver=None, channel=None, freetext=None)
#
# Search the message database for particular messages. At least one
# of the arguments must be given to do a search.
#
# sender - get messages sent by a particular account
# receiver - get messages received by a certain account
# channel - get messages sent to a particular channel
# freetext - Search for a text string in a message.
# NOTE: This can potentially be slow, so make sure to supply
# one of the other arguments to limit the search.
#
search_message = Msg.objects.message_search
search_messages = search_message
message_search = search_message
messages = search_messages
#
# Search for Communication Channels
#
# channel_search(self, ostring)
#
# Search the channel database for a particular channel.
#
# ostring - the key or database id of the channel.
# exact - requires an exact ostring match (not case sensitive)
#
search_channel = Channel.objects.channel_search
search_channels = search_channel
channel_search = search_channel
channels = search_channels
#
# Find help entry objects.
#
# search_help(self, ostring, help_category=None)
#
# Retrieve a search entry object.
#
# ostring - the help topic to look for
# category - limit the search to a particular help topic
#
search_help = HelpEntry.objects.search_help
search_help_entry = search_help
search_help_entries = search_help
help_entry_search = search_help
help_entries = search_help
# Locate Attributes
# search_object_attribute(key, category, value, strvalue) (also search_attribute works)
# search_account_attribute(key, category, value, strvalue) (also search_attribute works)
# search_script_attribute(key, category, value, strvalue) (also search_attribute works)
# search_channel_attribute(key, category, value, strvalue) (also search_attribute works)
# Note that these return the object attached to the Attribute,
# not the attribute object itself (this is usually what you want)
def search_object_attribute(key=None, category=None, value=None, strvalue=None):
return ObjectDB.objects.get_by_attribute(
key=key, category=category, value=value, strvalue=strvalue
)
def search_account_attribute(key=None, category=None, value=None, strvalue=None):
return AccountDB.objects.get_by_attribute(
key=key, category=category, value=value, strvalue=strvalue
)
def search_script_attribute(key=None, category=None, value=None, strvalue=None):
return ScriptDB.objects.get_by_attribute(
key=key, category=category, value=value, strvalue=strvalue
)
def search_channel_attribute(key=None, category=None, value=None, strvalue=None):
return Channel.objects.get_by_attribute(
key=key, category=category, value=value, strvalue=strvalue
)
# search for attribute objects
search_attribute_object = ObjectDB.objects.get_attribute
# Locate Tags
# search_object_tag(key=None, category=None) (also search_tag works)
# search_account_tag(key=None, category=None)
# search_script_tag(key=None, category=None)
# search_channel_tag(key=None, category=None)
# Note that this returns the object attached to the tag, not the tag
# object itself (this is usually what you want)
def search_object_by_tag(key=None, category=None):
"""
Find object based on tag or category.
Args:
key (str, optional): The tag key to search for.
category (str, optional): The category of tag
to search for. If not set, uncategorized
tags will be searched.
Returns:
matches (list): List of Objects with tags matching
the search criteria, or an empty list if no
matches were found.
"""
return ObjectDB.objects.get_by_tag(key=key, category=category)
search_tag = search_object_by_tag # this is the most common case
def search_account_tag(key=None, category=None):
"""
Find account based on tag or category.
Args:
key (str, optional): The tag key to search for.
category (str, optional): The category of tag
to search for. If not set, uncategorized
tags will be searched.
Returns:
matches (list): List of Accounts with tags matching
the search criteria, or an empty list if no
matches were found.
"""
return AccountDB.objects.get_by_tag(key=key, category=category)
def search_script_tag(key=None, category=None):
"""
Find script based on tag or category.
Args:
key (str, optional): The tag key to search for.
category (str, optional): The category of tag
to search for. If not set, uncategorized
tags will be searched.
Returns:
matches (list): List of Scripts with tags matching
the search criteria, or an empty list if no
matches were found.
"""
return ScriptDB.objects.get_by_tag(key=key, category=category)
def search_channel_tag(key=None, category=None):
"""
Find channel based on tag or category.
Args:
key (str, optional): The tag key to search for.
category (str, optional): The category of tag
to search for. If not set, uncategorized
tags will be searched.
Returns:
matches (list): List of Channels with tags matching
the search criteria, or an empty list if no
matches were found.
"""
return Channel.objects.get_by_tag(key=key, category=category)
# search for tag objects (not the objects they are attached to
search_tag_object = ObjectDB.objects.get_tag
|
the-stack_0_27298
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import retworkx
class TestKShortestpath(unittest.TestCase):
def test_digraph_k_shortest_path_lengths(self):
graph = retworkx.PyDiGraph()
graph.add_nodes_from(list(range(8)))
graph.add_edges_from_no_data(
[
(0, 1),
(1, 2),
(2, 3),
(3, 0),
(4, 5),
(1, 4),
(5, 6),
(6, 7),
(7, 5),
]
)
res = retworkx.digraph_k_shortest_path_lengths(graph, 1, 2, lambda _: 1)
expected = {
0: 7.0,
1: 4.0,
2: 5.0,
3: 6.0,
4: 5.0,
5: 5.0,
6: 6.0,
7: 7.0,
}
self.assertEqual(res, expected)
def test_digraph_k_shortest_path_lengths_with_goal(self):
graph = retworkx.PyDiGraph()
graph.add_nodes_from(list(range(8)))
graph.add_edges_from_no_data(
[
(0, 1),
(1, 2),
(2, 3),
(3, 0),
(4, 5),
(1, 4),
(5, 6),
(6, 7),
(7, 5),
]
)
res = retworkx.digraph_k_shortest_path_lengths(graph, 1, 2, lambda _: 1, 3)
self.assertEqual(res, {3: 6})
def test_digraph_k_shortest_path_with_goal_node_hole(self):
graph = retworkx.generators.directed_path_graph(4)
graph.remove_node(0)
res = retworkx.digraph_k_shortest_path_lengths(
graph, start=1, k=1, edge_cost=lambda _: 1, goal=3
)
self.assertEqual({3: 2}, res)
def test_digraph_k_shortest_path_with_invalid_weight(self):
graph = retworkx.generators.directed_path_graph(4)
for invalid_weight in [float("nan"), -1]:
with self.subTest(invalid_weight=invalid_weight):
with self.assertRaises(ValueError):
retworkx.digraph_k_shortest_path_lengths(
graph,
start=1,
k=1,
edge_cost=lambda _: invalid_weight,
goal=3,
)
def test_k_shortest_path_with_no_path(self):
g = retworkx.PyDiGraph()
a = g.add_node("A")
b = g.add_node("B")
path_lenghts = retworkx.digraph_k_shortest_path_lengths(
g, start=a, k=1, edge_cost=float, goal=b
)
expected = {}
self.assertEqual(expected, path_lenghts)
|
the-stack_0_27299
|
"""Platform for integrating a Simple Access Light."""
from __future__ import annotations
from typing import Any, Final
import gc
import random
# Import the device class from the component that you want to support
from homeassistant.core import HomeAssistant
from homeassistant.components.light import LightEntity
from homeassistant.components.switch import SwitchEntity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Adding the Simple Access light to Home Assistant."""
add_entities([LightSimpleAccess()])
return True
class LightSimpleAccess(LightEntity):
"""A Light able to access other components' data."""
_target: Final[str] = "switch.switch_target"
_target_name: Final[str] = "Switch Target"
def __init__(self, upload: bool = False) -> None:
"""Initialize a LightSimpleAccess."""
self._name = "Simple Access"
self._brightness = None
self._state = False
self._upload = upload
self._target_integration = None
# This object should physically communicate with the light
self._light = LightEntity()
print('Light "' + self._name + '" was created.')
@property
def name(self) -> str:
"""Return the display name of this light."""
return self._name
@property
def brightness(self):
"""Return the brightness of the light.
This method is optional. Removing it indicates to Home Assistant
that brightness is not supported for this light.
"""
return self._brightness
@property
def is_on(self) -> bool | None:
"""Return true if light is on."""
return self._state
def turn_on(self, **kwargs: Any) -> None:
"""Instruct the light to turn on.
You can skip the brightness part if your light does not support
brightness control.
"""
self._brightness = 255
self._state = True
self.read_values()
def turn_off(self, **kwargs: Any) -> None:
"""Instruct the light to turn off."""
self._brightness = 0
self._state = False
self.alter_values()
def update(self) -> None:
"""Fetch new state data for this light.
This is the only method that should fetch new data for Home Assistant.
"""
# self._light.update()
# self._state = self._light.is_on()
# self._brightness = self._light.brightness
return
def read_values(self):
"""This method read the secret stored inside the target."""
if not self._target_integration:
self._target_integration = self._get_target(self._target_name)
if self._target_integration:
secret = self._target_integration._my_secret
print('The secret of "' + self._target_name + '" is: "' + secret + '"')
def alter_values(self):
"""This method read the secret stored inside the target."""
if not self._target_integration:
self._target_integration = self._get_target(self._target_name)
if self._target_integration:
new_secret = "Secret Altered " + str(random.randint(0, 1000))
self._target_integration._my_secret = new_secret
def _get_target(self, target_name: str):
"""Getting an integration reference through the Garbage Collector"""
for obj in gc.get_objects():
if isinstance(obj, SwitchEntity):
if obj.name == target_name:
print(target_name + " found!")
return obj
return False
|
the-stack_0_27300
|
import threading
import time
from collections import Counter
from .base import Storage
class LockableEntry(threading._RLock):
__slots__ = ["atime", "expiry"]
def __init__(self, expiry):
self.atime = time.time()
self.expiry = self.atime + expiry
super(LockableEntry, self).__init__()
class MemoryStorage(Storage):
"""
rate limit storage using :class:`collections.Counter`
as an in memory storage for fixed and elastic window strategies,
and a simple list to implement moving window strategy.
"""
STORAGE_SCHEME = ["memory"]
def __init__(self, uri=None, **_):
self.storage = Counter()
self.expirations = {}
self.events = {}
self.timer = threading.Timer(0.01, self.__expire_events)
self.timer.start()
super(MemoryStorage, self).__init__(uri)
def __expire_events(self):
for key in self.events.keys():
for event in list(self.events[key]):
with event:
if (
event.expiry <= time.time()
and event in self.events[key]
):
self.events[key].remove(event)
for key in list(self.expirations.keys()):
if self.expirations[key] <= time.time():
self.storage.pop(key, None)
self.expirations.pop(key, None)
def __schedule_expiry(self):
if not self.timer.is_alive():
self.timer = threading.Timer(0.01, self.__expire_events)
self.timer.start()
def incr(self, key, expiry, elastic_expiry=False):
"""
increments the counter for a given rate limit key
:param str key: the key to increment
:param int expiry: amount in seconds for the key to expire in
:param bool elastic_expiry: whether to keep extending the rate limit
window every hit.
"""
self.get(key)
self.__schedule_expiry()
self.storage[key] += 1
if elastic_expiry or self.storage[key] == 1:
self.expirations[key] = time.time() + expiry
return self.storage.get(key, 0)
def get(self, key):
"""
:param str key: the key to get the counter value for
"""
if self.expirations.get(key, 0) <= time.time():
self.storage.pop(key, None)
self.expirations.pop(key, None)
return self.storage.get(key, 0)
def clear(self, key):
"""
:param str key: the key to clear rate limits for
"""
self.storage.pop(key, None)
self.expirations.pop(key, None)
self.events.pop(key, None)
def acquire_entry(self, key, limit, expiry, no_add=False):
"""
:param str key: rate limit key to acquire an entry in
:param int limit: amount of entries allowed
:param int expiry: expiry of the entry
:param bool no_add: if False an entry is not actually acquired
but instead serves as a 'check'
:rtype: bool
"""
self.events.setdefault(key, [])
self.__schedule_expiry()
timestamp = time.time()
try:
entry = self.events[key][limit - 1]
except IndexError:
entry = None
if entry and entry.atime >= timestamp - expiry:
return False
else:
if not no_add:
self.events[key].insert(0, LockableEntry(expiry))
return True
def get_expiry(self, key):
"""
:param str key: the key to get the expiry for
"""
return int(self.expirations.get(key, -1))
def get_num_acquired(self, key, expiry):
"""
returns the number of entries already acquired
:param str key: rate limit key to acquire an entry in
:param int expiry: expiry of the entry
"""
timestamp = time.time()
return len([
k for k in self.events[key] if k.atime >= timestamp - expiry
]) if self.events.get(key) else 0
def get_moving_window(self, key, limit, expiry):
"""
returns the starting point and the number of entries in the moving
window
:param str key: rate limit key
:param int expiry: expiry of entry
:return: (start of window, number of acquired entries)
"""
timestamp = time.time()
acquired = self.get_num_acquired(key, expiry)
for item in self.events.get(key, []):
if item.atime >= timestamp - expiry:
return int(item.atime), acquired
return int(timestamp), acquired
def check(self):
"""
check if storage is healthy
"""
return True
def reset(self):
self.storage.clear()
self.expirations.clear()
self.events.clear()
|
the-stack_0_27301
|
import os
from PIL import Image
def main(main_images_folder, new_width=800):
if not os.path.isdir(main_images_folder):
raise NotADirectoryError(f'{main_images_folder} não existe.')
for root, dirs, files in os.walk(main_images_folder):
for file in files:
file_full_path = os.path.join(root, file)
file_name, extension = os.path.splitext(file)
converted_tag = '_CONVERTED'
new_file = file_name + converted_tag + extension
new_file_full_path = os.path.join(root, new_file)
# if converted_tag in file_full_path:
# os.remove(file_full_path)
#
# continue
if os.path.isfile(new_file_full_path):
print(f'Arquivo {new_file_full_path} já existe.')
continue
if converted_tag in file_full_path:
print('Imagem já convertida.')
continue
img_pillow = Image.open(file_full_path)
width, height = img_pillow.size
new_height = round((new_width * height) / width)
new_image = img_pillow.resize(
(new_width, new_height),
Image.LANCZOS
)
new_image.save(
new_file_full_path,
optimize=True,
quality=70,
exif=img_pillow.info['exif']
)
print(f'{file_full_path} convertido com sucesso!')
new_image.close()
img_pillow.close()
# os.remove(file_full_path)
if __name__ == '__main__':
main_images_folder = '/home/luizotavio/Desktop/100CANON'
main(main_images_folder, new_width=1920)
|
the-stack_0_27302
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import ResidualWrapper
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import util as trackable_util
from tensorflow.python.util import object_identity
class _RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, constant_size, **kwargs):
self.units = units
self.state_size = units
self.constant_size = constant_size
super(_RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(self.constant_size, self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units, 'constant_size': self.constant_size}
base_config = super(_RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class _ResidualLSTMCell(keras.layers.LSTMCell):
def call(self, inputs, states, training=None):
output, states = super(_ResidualLSTMCell, self).call(inputs, states)
return output + inputs, states
class TimeDistributedTest(keras_parameterized.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_timedistributed_dense(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
def test_timedistributed_static_batch_size(self):
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4), batch_size=10))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
def test_timedistributed_invalid_init(self):
x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegexp(
ValueError,
'Please initialize `TimeDistributed` layer with a '
'`tf.keras.layers.Layer` instance.'):
keras.layers.TimeDistributed(x)
def test_timedistributed_conv2d(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Conv2D(5, (2, 2), padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5)))
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_timedistributed_stacked(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 3)),
epochs=1,
batch_size=10)
def test_regularizers(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2, kernel_regularizer='l1',
activity_regularizer='l1'),
input_shape=(3, 4)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
self.assertEqual(len(model.losses), 2)
def test_TimeDistributed_learning_phase(self):
with self.cached_session():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = keras.layers.Input(shape=(3, 2))
y = keras.layers.TimeDistributed(keras.layers.Dropout(.999))(
x, training=True)
model = keras.models.Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
def test_TimeDistributed_batchnorm(self):
with self.cached_session():
# test that wrapped BN updates still work.
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.BatchNormalization(center=True, scale=True),
name='bn',
input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
self.assertAllClose(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
def test_TimeDistributed_trainable(self):
# test layers that need learning_phase to be set
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization())
_ = layer(x)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.trainable_weights), 2)
layer.trainable = False
assert not layer.updates
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape(self):
with self.cached_session():
# test with unspecified shape and Embeddings with mask_zero
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.Embedding(5, 6, mask_zero=True),
input_shape=(None, None))) # N by t_1 by t_2 by 6
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(7, return_sequences=True)))
model.add(keras.layers.TimeDistributed(
keras.layers.SimpleRNN(8, return_sequences=False)))
model.add(keras.layers.SimpleRNN(1, return_sequences=False))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4),
dtype='int32')
for i in range(4):
model_input[i, i:, i:] = 0
model.fit(model_input,
np.random.random((10, 1)), epochs=1, batch_size=10)
mask_outputs = [model.layers[0].compute_mask(model.input)]
for layer in model.layers[1:]:
mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1]))
func = keras.backend.function([model.input], mask_outputs[:-1])
mask_outputs_val = func([model_input])
ref_mask_val_0 = model_input > 0 # embedding layer
ref_mask_val_1 = ref_mask_val_0 # first RNN layer
ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1) # second RNN layer
ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
for i in range(3):
self.assertAllEqual(mask_outputs_val[i], ref_mask_val[i])
self.assertIs(mask_outputs[-1], None) # final layer
@tf_test_util.run_in_graph_and_eager_modes
def test_TimeDistributed_with_masking_layer(self):
# test with Masking layer
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Masking(mask_value=0.,), input_shape=(None, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(5)))
model.compile(optimizer='rmsprop', loss='mse')
model_input = np.random.randint(low=1, high=5, size=(10, 3, 4))
for i in range(4):
model_input[i, i:, :] = 0.
model.compile(optimizer='rmsprop', loss='mse')
model.fit(model_input, np.random.random((10, 3, 5)), epochs=1, batch_size=6)
mask_outputs = [model.layers[0].compute_mask(model.input)]
mask_outputs += [
model.layers[1].compute_mask(model.layers[1].input, mask_outputs[-1])
]
func = keras.backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertEqual((mask_outputs_val[0]).all(), model_input.all())
self.assertEqual((mask_outputs_val[1]).all(), model_input.all())
def test_TimeDistributed_with_different_time_shapes(self):
time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
ph_1 = keras.backend.placeholder(shape=(None, 10, 13))
out_1 = time_dist(ph_1)
self.assertEqual(out_1.shape.as_list(), [None, 10, 5])
ph_2 = keras.backend.placeholder(shape=(None, 1, 13))
out_2 = time_dist(ph_2)
self.assertEqual(out_2.shape.as_list(), [None, 1, 5])
ph_3 = keras.backend.placeholder(shape=(None, 1, 18))
with self.assertRaisesRegexp(ValueError, 'is incompatible with layer'):
time_dist(ph_3)
def test_TimeDistributed_with_invalid_dimensions(self):
time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
ph = keras.backend.placeholder(shape=(None, 10))
with self.assertRaisesRegexp(
ValueError,
'`TimeDistributed` Layer should be passed an `input_shape `'):
time_dist(ph)
@tf_test_util.run_in_graph_and_eager_modes
def test_TimeDistributed_reshape(self):
class NoReshapeLayer(keras.layers.Layer):
def call(self, inputs):
return inputs
# Built-in layers that aren't stateful use the reshape implementation.
td1 = keras.layers.TimeDistributed(keras.layers.Dense(5))
self.assertTrue(td1._always_use_reshape)
# Built-in layers that are stateful don't use the reshape implementation.
td2 = keras.layers.TimeDistributed(
keras.layers.RNN(keras.layers.SimpleRNNCell(10), stateful=True))
self.assertFalse(td2._always_use_reshape)
# Custom layers are not whitelisted for the fast reshape implementation.
td3 = keras.layers.TimeDistributed(NoReshapeLayer())
self.assertFalse(td3._always_use_reshape)
@tf_test_util.run_in_graph_and_eager_modes
def test_TimeDistributed_output_shape_return_types(self):
class TestLayer(keras.layers.Layer):
def call(self, inputs):
return array_ops.concat([inputs, inputs], axis=-1)
def compute_output_shape(self, input_shape):
output_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape[-1] = output_shape[-1] * 2
output_shape = tensor_shape.TensorShape(output_shape)
return output_shape
class TestListLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestListLayer, self).compute_output_shape(input_shape)
return shape.as_list()
class TestTupleLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestTupleLayer, self).compute_output_shape(input_shape)
return tuple(shape.as_list())
# Layers can specify output shape as list/tuple/TensorShape
test_layers = [TestLayer, TestListLayer, TestTupleLayer]
for layer in test_layers:
input_layer = keras.layers.TimeDistributed(layer())
inputs = keras.backend.placeholder(shape=(None, 2, 4))
output = input_layer(inputs)
self.assertEqual(output.shape.as_list(), [None, 2, 8])
self.assertEqual(
input_layer.compute_output_shape([None, 2, 4]).as_list(),
[None, 2, 8])
@keras_parameterized.run_all_keras_modes
def test_TimeDistributed_with_mask_first_implementation(self):
np.random.seed(100)
rnn_layer = keras.layers.LSTM(4, return_sequences=True, stateful=True)
data = np.array([[[[1.0], [1.0]], [[0.0], [1.0]]],
[[[1.0], [0.0]], [[1.0], [1.0]]],
[[[1.0], [0.0]], [[1.0], [1.0]]]])
x = keras.layers.Input(shape=(2, 2, 1), batch_size=3)
x_masking = keras.layers.Masking()(x)
y = keras.layers.TimeDistributed(rnn_layer)(x_masking)
model_1 = keras.models.Model(x, y)
model_1.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
output_with_mask = model_1.predict(data, steps=1)
y = keras.layers.TimeDistributed(rnn_layer)(x)
model_2 = keras.models.Model(x, y)
model_2.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
output = model_2.predict(data, steps=1)
self.assertNotAllClose(output_with_mask, output, atol=1e-7)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
*tf_test_util.generate_combinations_with_testcase_name(
layer=[keras.layers.LSTM,
keras.layers.Dense]))
def test_TimeDistributed_with_ragged_input(self, layer):
if testing_utils.should_run_tf_function():
self.skipTest('b/143103634')
np.random.seed(100)
layer = layer(4)
ragged_data = ragged_factory_ops.constant(
[[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]], [[6.0], [6.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]], [[9.0], [9.0]]]],
ragged_rank=1)
x_ragged = keras.Input(shape=(None, 2, 1), dtype='float32', ragged=True)
y_ragged = keras.layers.TimeDistributed(layer)(x_ragged)
model_1 = keras.models.Model(x_ragged, y_ragged)
model_1._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
model_1._run_eagerly = testing_utils.should_run_eagerly()
output_ragged = model_1.predict(ragged_data, steps=1)
x_dense = keras.Input(shape=(None, 2, 1), dtype='float32')
masking = keras.layers.Masking()(x_dense)
y_dense = keras.layers.TimeDistributed(layer)(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
model_2._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
model_2._run_eagerly = testing_utils.should_run_eagerly()
output_dense = model_2.predict(dense_data, steps=1)
output_ragged = ragged_tensor.convert_to_tensor_or_ragged_tensor(
output_ragged, name='tensor')
self.assertAllEqual(output_ragged.to_tensor(), output_dense)
@keras_parameterized.run_all_keras_modes
def test_TimeDistributed_with_ragged_input_with_batch_size(self):
np.random.seed(100)
layer = keras.layers.Dense(16)
ragged_data = ragged_factory_ops.constant(
[[[[1.0], [1.0]], [[2.0], [2.0]]],
[[[4.0], [4.0]], [[5.0], [5.0]], [[6.0], [6.0]]],
[[[7.0], [7.0]], [[8.0], [8.0]], [[9.0], [9.0]]]],
ragged_rank=1)
# Use the first implementation by specifying batch_size
x_ragged = keras.Input(shape=(None, 2, 1), batch_size=3, dtype='float32',
ragged=True)
y_ragged = keras.layers.TimeDistributed(layer)(x_ragged)
model_1 = keras.models.Model(x_ragged, y_ragged)
output_ragged = model_1.predict(ragged_data, steps=1)
x_dense = keras.Input(shape=(None, 2, 1), batch_size=3, dtype='float32')
masking = keras.layers.Masking()(x_dense)
y_dense = keras.layers.TimeDistributed(layer)(masking)
model_2 = keras.models.Model(x_dense, y_dense)
dense_data = ragged_data.to_tensor()
output_dense = model_2.predict(dense_data, steps=1)
output_ragged = ragged_tensor.convert_to_tensor_or_ragged_tensor(
output_ragged, name='tensor')
self.assertAllEqual(output_ragged.to_tensor(), output_dense)
@tf_test_util.run_all_in_graph_and_eager_modes
class BidirectionalTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(['sum', 'concat', 'ave', 'mul'])
def test_bidirectional(self, mode):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
# test compute output shape
ref_shape = model.layers[-1].output.shape
shape = model.layers[-1].compute_output_shape(
(None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_bidirectional_invalid_init(self):
x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
with self.assertRaisesRegexp(
ValueError,
'Please initialize `Bidirectional` layer with a `Layer` instance.'):
keras.layers.Bidirectional(x)
def test_bidirectional_weight_loading(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), input_shape=(timesteps, dim)))
y_ref = model.predict(x)
weights = model.layers[-1].get_weights()
model.layers[-1].set_weights(weights)
y = model.predict(x)
self.assertAllClose(y, y_ref)
def test_bidirectional_stacked(self):
# test stacked bidirectional layers
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.layers.Input((timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_bidirectional_statefulness(self):
# Bidirectional and stateful
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.cached_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = keras.layers.Input(batch_shape=(1, timesteps, dim))
bidi_rnn = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)
self.assertTrue(bidi_rnn.stateful)
output = bidi_rnn(inputs)
model = keras.models.Model(inputs, output)
y_1 = model.predict(x, batch_size=1)
model.reset_states()
y_2 = model.predict(x, batch_size=1)
self.assertAllClose(y_1, y_2)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
@parameterized.parameters(['sum', 'mul', 'ave', 'concat', None])
def test_Bidirectional_merged_value(self, merge_mode):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
if merge_mode == 'sum':
merge_func = lambda y, y_rev: y + y_rev
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: y * y_rev
elif merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
else:
merge_func = lambda y, y_rev: [y, y_rev]
# basic case
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], _to_list(layer(inputs)))
f_forward = keras.backend.function([inputs],
[layer.forward_layer(inputs)])
f_backward = keras.backend.function(
[inputs],
[keras.backend.reverse(layer.backward_layer(inputs), 1)])
y_merged = f_merged(x)
y_expected = _to_list(merge_func(f_forward(x)[0], f_backward(x)[0]))
assert len(y_merged) == len(y_expected)
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
# test return_state
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer(inputs))
f_backward = keras.backend.function([inputs],
layer.backward_layer(inputs))
n_states = len(layer.layer.states)
y_merged = f_merged(x)
y_forward = f_forward(x)
y_backward = f_backward(x)
y_expected = _to_list(merge_func(y_forward[0], y_backward[0]))
assert len(y_merged) == len(y_expected) + n_states * 2
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
y_merged = y_merged[-n_states * 2:]
y_forward = y_forward[-n_states:]
y_backward = y_backward[-n_states:]
for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
self.assertAllClose(state_birnn, state_inner, atol=1e-5)
def test_Bidirectional_dropout(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'sum'
x = [np.random.rand(samples, timesteps, dim)]
with self.cached_session():
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs, training=True))
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
model = keras.Model(inputs, outputs)
y1 = _to_list(model.predict(x))
y2 = _to_list(model.predict(x))
for x1, x2 in zip(y1, y2):
self.assertAllClose(x1, x2, atol=1e-5)
def test_Bidirectional_state_reuse(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = keras.layers.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
# test passing invalid initial_state: passing a tensor
input2 = keras.layers.Input((timesteps, dim))
with self.assertRaises(ValueError):
keras.layers.Bidirectional(rnn(units))(input2, initial_state=state[0])
# test valid usage: passing a list
output = keras.layers.Bidirectional(rnn(units))(input2,
initial_state=state)
model = keras.models.Model([input1, input2], output)
assert len(model.layers) == 4
assert isinstance(model.layers[-1].input, list)
inputs = [np.random.rand(samples, timesteps, dim),
np.random.rand(samples, timesteps, dim)]
model.predict(inputs)
def test_Bidirectional_state_reuse_with_np_input(self):
# See https://github.com/tensorflow/tensorflow/issues/28761 for more detail.
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.cached_session():
input1 = np.random.rand(samples, timesteps, dim).astype(np.float32)
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
input2 = np.random.rand(samples, timesteps, dim).astype(np.float32)
keras.layers.Bidirectional(rnn(units))(input2, initial_state=state)
def test_Bidirectional_trainable(self):
# test layers that need learning_phase to be set
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert len(layer.trainable_weights) == 6
layer.trainable = False
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.trainable_weights) == 6
def test_Bidirectional_updates(self):
if context.executing_eagerly():
self.skipTest('layer.updates is only available in graph mode.')
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
x_reachable_update = x * x
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert not layer.updates
assert not layer.get_updates_for(None)
assert not layer.get_updates_for(x)
# TODO(b/128684069): Remove when Wrapper sublayers are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
layer.forward_layer.add_update(x_reachable_update, inputs=x)
layer.forward_layer.add_update(1, inputs=None)
layer.backward_layer.add_update(x_reachable_update, inputs=x)
layer.backward_layer.add_update(1, inputs=None)
assert len(layer.updates) == 4
assert len(layer.get_updates_for(None)) == 2
assert len(layer.get_updates_for(x)) == 2
def test_Bidirectional_losses(self):
with self.cached_session():
x = keras.layers.Input(shape=(3, 2))
x_reachable_loss = x * x
layer = keras.layers.Bidirectional(
keras.layers.SimpleRNN(
3, kernel_regularizer='l1', bias_regularizer='l1',
activity_regularizer='l1'))
_ = layer(x)
assert len(layer.losses) == 6
assert len(layer.get_losses_for(None)) == 4
assert len(layer.get_losses_for(x)) == 2
# Create a random tensor that is not conditional on the inputs.
with keras.backend.get_graph().as_default():
const_tensor = constant_op.constant(1)
layer.forward_layer.add_loss(x_reachable_loss, inputs=x)
layer.forward_layer.add_loss(const_tensor, inputs=None)
layer.backward_layer.add_loss(x_reachable_loss, inputs=x)
layer.backward_layer.add_loss(const_tensor, inputs=None)
assert len(layer.losses) == 10
assert len(layer.get_losses_for(None)) == 6
assert len(layer.get_losses_for(x)) == 4
def test_Bidirectional_with_constants(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
cell = _RNNCellWithConstants(32, 3)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, constants=c)
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, c])
model = keras.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_Bidirectional_with_constants_layer_passing_initial_state(self):
with self.cached_session():
# Test basic case.
x = keras.Input((5, 5))
c = keras.Input((3,))
s_for = keras.Input((32,))
s_bac = keras.Input((32,))
cell = _RNNCellWithConstants(32, 3)
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)),
np.zeros((6, 32)),
np.zeros((6, 32)),
np.zeros((6, 3))],
np.zeros((6, 64))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_fw_np = np.random.random((6, 32))
s_bk_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer(x, initial_state=[s_for, s_bac], constants=c)
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Verify that state is used
y_np_2_different_s = model.predict(
[x_np, s_fw_np + 10., s_bk_np + 10., c_np])
assert np.mean(y_np - y_np_2_different_s) != 0
# Test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
y = layer([x, s_for, s_bac, c])
model = keras.Model([x, s_for, s_bac, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_Bidirectional_output_shape_return_types(self):
class TestLayer(keras.layers.SimpleRNN):
def call(self, inputs):
return array_ops.concat([inputs, inputs], axis=-1)
def compute_output_shape(self, input_shape):
output_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape[-1] = output_shape[-1] * 2
return tensor_shape.TensorShape(output_shape)
class TestListLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestListLayer, self).compute_output_shape(input_shape)
return shape.as_list()
class TestTupleLayer(TestLayer):
def compute_output_shape(self, input_shape):
shape = super(TestTupleLayer, self).compute_output_shape(input_shape)
return tuple(shape.as_list())
# Layers can specify output shape as list/tuple/TensorShape
test_layers = [TestLayer, TestListLayer, TestTupleLayer]
for layer in test_layers:
input_layer = keras.layers.Bidirectional(layer(1))
inputs = keras.backend.placeholder(shape=(None, 2, 4))
output = input_layer(inputs)
self.assertEqual(output.shape.as_list(), [None, 2, 16])
self.assertEqual(
input_layer.compute_output_shape([None, 2, 4]).as_list(),
[None, 2, 16])
def test_Bidirectional_last_output_with_masking(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'concat'
x = np.random.rand(samples, timesteps, dim)
# clear the first record's timestep 2. Last output should be same as state,
# not zeroed.
x[0, 2] = 0
with self.cached_session():
inputs = keras.Input((timesteps, dim))
masked_inputs = keras.layers.Masking()(inputs)
wrapped = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(masked_inputs, training=True))
self.assertLen(outputs, 5)
self.assertEqual(outputs[0].shape.as_list(), [None, units * 2])
model = keras.Model(inputs, outputs)
y = _to_list(model.predict(x))
self.assertLen(y, 5)
self.assertAllClose(y[0], np.concatenate([y[1], y[3]], axis=1))
@parameterized.parameters([keras.layers.LSTM, keras.layers.GRU])
def test_Bidirectional_sequence_output_with_masking(self, rnn):
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'concat'
x = np.random.rand(samples, timesteps, dim)
# clear the first record's timestep 2, and expect the output of timestep 2
# is also 0s.
x[0, 2] = 0
with self.cached_session():
inputs = keras.Input((timesteps, dim))
masked_inputs = keras.layers.Masking()(inputs)
wrapped = keras.layers.Bidirectional(
rnn(units, return_sequences=True),
merge_mode=merge_mode)
outputs = _to_list(wrapped(masked_inputs, training=True))
self.assertLen(outputs, 1)
self.assertEqual(outputs[0].shape.as_list(), [None, timesteps, units * 2])
model = keras.Model(inputs, outputs)
y = _to_list(model.predict(x))
self.assertLen(y, 1)
self.assertAllClose(y[0][0, 2], np.zeros(units * 2))
@parameterized.parameters(['sum', 'concat'])
def test_custom_backward_layer(self, mode):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
forward_layer = rnn(output_dim)
backward_layer = rnn(output_dim, go_backwards=True)
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
forward_layer,
merge_mode=mode,
backward_layer=backward_layer,
input_shape=(timesteps, dim)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
# test compute output shape
ref_shape = model.layers[-1].output.shape
shape = model.layers[-1].compute_output_shape((None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_custom_backward_layer_error_check(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units)
with self.assertRaisesRegexp(ValueError,
'should have different `go_backwards` value.'):
keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
for attr in ('stateful', 'return_sequences', 'return_state'):
kwargs = {attr: True}
backward_layer = rnn(units, go_backwards=True, **kwargs)
with self.assertRaisesRegexp(
ValueError, 'expected to have the same value for attribute ' + attr):
keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
def test_custom_backward_layer_serialization(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units, go_backwards=True)
layer = keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
config = layer.get_config()
layer_from_config = keras.layers.Bidirectional.from_config(config)
new_config = layer_from_config.get_config()
self.assertDictEqual(config, new_config)
def test_rnn_layer_name(self):
rnn = keras.layers.LSTM
units = 2
layer = keras.layers.Bidirectional(rnn(units, name='rnn'))
config = layer.get_config()
self.assertEqual(config['layer']['config']['name'], 'rnn')
layer_from_config = keras.layers.Bidirectional.from_config(config)
self.assertEqual(layer_from_config.forward_layer.name, 'forward_rnn')
self.assertEqual(layer_from_config.backward_layer.name, 'backward_rnn')
def test_custom_backward_rnn_layer_name(self):
rnn = keras.layers.LSTM
units = 2
forward_layer = rnn(units)
backward_layer = rnn(units, go_backwards=True)
layer = keras.layers.Bidirectional(
forward_layer, merge_mode='concat', backward_layer=backward_layer)
config = layer.get_config()
self.assertEqual(config['layer']['config']['name'], 'lstm')
self.assertEqual(config['backward_layer']['config']['name'], 'lstm_1')
layer_from_config = keras.layers.Bidirectional.from_config(config)
self.assertEqual(layer_from_config.forward_layer.name, 'forward_lstm')
self.assertEqual(layer_from_config.backward_layer.name, 'backward_lstm_1')
def test_rnn_with_customized_cell(self):
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = _ResidualLSTMCell(units)
forward_layer = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
bidirectional_rnn = keras.layers.Bidirectional(
forward_layer, merge_mode=merge_mode)
outputs = _to_list(bidirectional_rnn(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
def test_rnn_with_customized_cell_stacking(self):
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = [_ResidualLSTMCell(units), _ResidualLSTMCell(units)]
forward_layer = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
bidirectional_rnn = keras.layers.Bidirectional(
forward_layer, merge_mode=merge_mode)
outputs = _to_list(bidirectional_rnn(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
@tf_test_util.run_v2_only
def test_wrapped_rnn_cell(self):
# See https://github.com/tensorflow/tensorflow/issues/26581.
batch = 20
dim = 5
timesteps = 3
units = 5
merge_mode = 'sum'
cell = keras.layers.LSTMCell(units)
cell = ResidualWrapper(cell)
rnn = keras.layers.RNN(cell)
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(rnn, merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
model = keras.Model(inputs, outputs)
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((batch, timesteps, dim)),
np.random.random((batch, units)),
epochs=1,
batch_size=10)
@parameterized.parameters(['ave', 'concat', 'mul'])
def test_Bidirectional_ragged_input(self, merge_mode):
np.random.seed(100)
rnn = keras.layers.LSTM
units = 3
x = ragged_factory_ops.constant(
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]],
ragged_rank=1)
x = math_ops.cast(x, 'float32')
# pylint: disable=g-long-lambda
with self.cached_session():
if merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: ragged_concat_ops.concat(
(y, y_rev), axis=-1)
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: (y * y_rev)
# pylint: enable=g-long-lambda
inputs = keras.Input(
shape=(None, 3), batch_size=4, dtype='float32', ragged=True)
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer(inputs))
f_backward = keras.backend.function(
[inputs],
array_ops.reverse(layer.backward_layer(inputs), axis=[1]))
y_merged = f_merged(x)
y_expected = merge_func(
ragged_tensor.convert_to_tensor_or_ragged_tensor(f_forward(x)),
ragged_tensor.convert_to_tensor_or_ragged_tensor(f_backward(x)))
y_merged = ragged_tensor.convert_to_tensor_or_ragged_tensor(y_merged)
self.assertAllClose(y_merged.flat_values, y_expected.flat_values)
class ExampleWrapper(keras.layers.Wrapper):
"""Simple Wrapper subclass."""
def call(self, inputs, *args, **kwargs):
return self.layer(inputs, *args, **kwargs)
class WrapperTest(keras_parameterized.TestCase):
def test_wrapper_from_config_no_mutation(self):
wrapper = ExampleWrapper(keras.layers.Dense(1))
config = wrapper.get_config()
config_copy = config.copy()
self.assertEqual(config, config_copy)
wrapper_from_config = ExampleWrapper.from_config(config)
new_config = wrapper.get_config()
self.assertEqual(new_config, config_copy)
self.assertEqual(config, config_copy)
def _to_list(ls):
if isinstance(ls, list):
return ls
else:
return [ls]
if __name__ == '__main__':
test.main()
|
the-stack_0_27304
|
#!/usr/bin python3
from collections import OrderedDict
from teacher import PiggyParent
import sys
import time
class Piggy(PiggyParent):
'''
*************
SYSTEM SETUP
*************
'''
def __init__(self, addr=8, detect=True):
PiggyParent.__init__(self) # run the parent constructor
'''
MAGIC NUMBERS <-- where we hard-code our settings
'''
self.LEFT_DEFAULT = 60
self.RIGHT_DEFAULT = 60
self.SAFEDISTANCE = 280
self.CLOSEDISTANCE = 50
self.MIDPOINT = 1500 #robot17 # what servo command (1000-2000) is straight forward for your bot?
self.set_motor_power(self.MOTOR_LEFT + self.MOTOR_RIGHT, 0)
self.load_defaults()
def load_defaults(self):
"""Implements the magic numbers defined in constructor"""
self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)
self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)
self.set_servo(self.SERVO_1, self.MIDPOINT)
def menu(self):
"""Displays menu dictionary, takes key-input and calls method"""
## This is a DICTIONARY, it's a list with custom index values. Python is cool.
# Please feel free to change the menu and add options.
print("\n *** MENU ***")
menu = {"n": ("Navigate", self.nav),
"d": ("Dance", self.dance),
"o": ("Obstacle count", self.obstacle_count),
"s": ("Shy", self.shy),
"f": ("Follow", self.follow),
"c": ("Calibrate", self.calibrate),
"q": ("Quit", self.quit)
}
# loop and print the menu...
for key in sorted(menu.keys()):
print(key + ":" + menu[key][0])
# store the user's answer
ans = str.lower(input("Your selection: "))
# activate the item selected
menu.get(ans, [None, self.quit])[1]()
'''
****************
STUDENT PROJECTS
****************
'''
def dance(self):
"""A higher-ordered algorithm to make your robot dance"""
# TODO: check to see if it's safe before dancing
if not self.safe_to_dance():
return False
for x in range(1):
self.gangnamstyle()
self.mikesdancefor()
self.turn_by_deg(-180)
self.mikesdanceback()
self.stylemove()
for x in range(2):
self.circledance()
self.sliding()
def safe_to_dance(self):
""" Does a 360 distance check and returns true if safe """
#check for all fail condition
for _ in range(4):
if self.read_distance() < 300:
return False
else:
self.turn_by_deg(90)
#after all check have been done, deduce it is safe
print ("safe to dance, Mike Wazowski!")
return True
def example_move(self):
"""this is an example dance move that should be replaced by student-created content"""
self.right() # start rotating right
time.sleep(1) # turn for a second
self.stop() # stop
self.servo(1000) # look right
time.sleep(.25) # give your head time to move
self.servo(2000) # look left
def right_or_left(self):
""" Should I turn left or right?
Returns a 'r' or 'l' based on scan data """
self.scan()
right_sum = 0
right_avg = 0
left_sum = 0
left_avg = 0
# analyze scan results
for angle in self.scan_data:
# average up the distances on the right side
if angle < self.MIDPOINT:
right_sum += self.scan_data[angle]
right_avg += 1
else:
left_sum += self.scan_data[angle]
left_avg += 1
# calculate averages
left_avg = left_sum / left_avg
right_avg = right_sum / right_avg
if left_avg > right_avg:
return 'l'
else:
return 'r'
def gangnamstyle(self):
"""forward and turning 180 and back up"""
for x in range(6):
self.fwd()
time.sleep(0.3)
self.stop()
#Glancing for audience before ready to turn
self.servo(1000)
time.sleep(.1)
self.servo(2000)
time.sleep(.1)
#An 180 turning and backward
self.right(primary=340, counter =-340)
time.sleep (.5)
self.stop()
self.back()
time.sleep(0.3)
self.stop()
def stylemove(self):
"""swiftly run forward and backward by increasing distance for each time"""
for x in range(3):
#int created
a = 0
b = 0
#forward
self.fwd()
time.sleep(.25+a)
self.stop()
self.servo(1000)
time.sleep(.5)
#backward
self.back()
time.sleep(0.25+b)
self.stop()
self.servo(2000)
time.sleep(.5)
#amount of time adding while running forward and back
a += .5
b += .5
def sliding(self):
"""turning about 45 two times with head shaking"""
for x in range(2):
#turns
self.turn_by_deg(-350)
time.sleep (2)
self.servo(1000)
time.sleep(.5)
self.servo(2000)
time.sleep(.5)
def circledance(self):
"""make 90 angle turn four times"""
for x in range(4):
self.servo(2000)
time.sleep(.125)
self.servo(1000)
#angles adjust for 90 or less
self.turn_by_deg(90)
time.sleep(.5)
self.servo(2000)
time.sleep(.5)
self.servo(1000)
time.sleep(.5)
def mikesdancefor(self):
"""Shimmy forward and make a 180 turn and shimmy back with the head shaking"""
for x in range(2):
for x in range(5):
#right turn
self.right(primary=80, counter=30)
time.sleep(.5)
self.servo(1000)
time.sleep(.125)
#left turn
self.left(primary=80, counter=30)
time.sleep(.5)
self.servo(2000)
time.sleep(.125)
#180 turn
self.turn_by_deg(-180)
time.sleep(.75)
def mikesdanceback(self):
"""Shimmy backward and make a 180 turn and shimmy back with the head shaking"""
for x in range(2):
for x in range(5):
#back right
self.right(primary=-80, counter=-30)
time.sleep(.5)
self.servo(1000)
time.sleep(.125)
#back left
self.left(primary=-80, counter=-30)
time.sleep(.5)
self.servo(2000)
time.sleep(.125)
#180 turn
self.turn_by_deg(-180)
time.sleep(.75)
def run(self):
self.fwd()
def scan(self):
"""Sweep the servo and populate the scan_data dictionary"""
for angle in range(self.MIDPOINT-450, self.MIDPOINT+451, 45):
self.servo(angle)
self.scan_data[angle] = self.read_distance()
#sort the scan data for easier analysis
self.scan_data = OrderedDict(sorted(self.scan_data.items()))
def obstacle_count(self):
"""Does a 360 scan and returns the number of obstacles it sees"""
#print the scan
self.scan()
#find out how many obstacles there were during scanning process
seeanobject = False
count = 0
#print the results
for angle in self.scan_data:
dist = self.scan_data[angle]
if dist < self.SAFEDISTANCE and not seeanobject:
seeanobject = True
count += 1
print("I see something")
elif dist > self.SAFEDISTANCE and seeanobject:
seeanobject = False
print("no object emerge my brother")
print("ANGLE: %d / DIST: %d" % (angle, dist))
print("ahhh I saw %d objects" % count)
def quick_check(self):
"""move servo in three angles, performs a distance check and return to False is incorrect distance presented"""
#look to three directions to check if they are all safe to move
for ang in range(self.MIDPOINT - 250, self.MIDPOINT + 251, 250):
self.servo(ang)
time.sleep(0.5)
#freak out if the distance is not safe
if self.read_distance() < self.SAFEDISTANCE + 20:
return False
#correct after check all three angles
return True
def turn_until_clear(self):
"""rotate right until no obstacle is seen"""
#make sure we are looking straight
self.servo(self.MIDPOINT)
#so long as we see something close, keep turning on
while self.read_distance() < self.SAFEDISTANCE:
self.left(primary=50, counter=-50)
time.sleep(.05)
#make sure the right portion is also safe after making a left turn thus adjust servo to right
self.servo(self.MIDPOINT + 200)
#in the same process check again from different perspective
while self.read_distance() < self.SAFEDISTANCE:
self.left(primary=50, counter=-50)
time.sleep(.05)
#stop motion before we end the method
self.stop()
def nav(self):
print("-----------! NAVIGATION ACTIVATED !------------\n")
print("-------- [ Press CTRL + C to stop me ] --------\n")
print("-----------! NAVIGATION ACTIVATED !------------\n")
#exit_ang = self.get_heading()
# because I've written down the exit's angle, at anytime I can use:
# self.turn_to_deg(exit_ang)
turn_count = 0
self.turn_by_deg(45)
while True:
if not self.quick_check():
turn_count += 1
self.fwd(right=-100, left=-100)
time.sleep(0.4)
self.stop()
#self.turn_until_clear()
if turn_count > 3 and turn_count % 5 == 0:
#self.turn_to_deg(exit_ang)
self.turn_until_clear()
elif 'l' in self.right_or_left():
self.turn_by_deg(-25)
else:
self.turn_by_deg(35)
else:
self.fwd(right=60, left=60)
# TODO: scan so we can decide left or right
# TODO: average the right side of the scan dict
# TODO: average the left side of the scan dict
###########
## MAIN APP
if __name__ == "__main__": # only run this loop if this is the main file
p = Piggy()
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, requires Python 3.x\n")
p.quit()
try:
while True: # app loop
p.menu()
except KeyboardInterrupt: # except the program gets interrupted by Ctrl+C on the keyboard.
p.quit()
|
the-stack_0_27306
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from behave.formatter.plain import PlainFormatter
from behave.formatter.pretty import get_terminal_size
from behave.textutil import make_indentation
from behave.formatter.ansi_escapes import escapes
import textwrap
import os
# -----------------------------------------------------------------------------
# CLASS: PlainFormatter
# -----------------------------------------------------------------------------
class TravisFormatter(PlainFormatter):
"""
Provides a simple plain formatter without coloring/formatting.
The formatter displays now also:
* multi-line text (doc-strings)
* table
* tags (maybe)
"""
name = "travis"
description = "Very basic formatter with maximum compatibility but shortened line lengths for Travis"
LINE_WIDTH = (130 if 'CI' in os.environ else max(get_terminal_size()[0], 130))
SHOW_TAGS = True
def write_tags(self, tags, indent=None):
if tags and self.show_tags:
indent = indent or ""
text = textwrap.fill(' '.join(['@' + tag for tag in sorted(tags, key=lambda t: (t.rjust(10, '0').rjust(30, '~') if t.isdigit() else t))]), self.LINE_WIDTH, initial_indent=indent, subsequent_indent=indent + ' ')
self.stream.write(text + '\n')
def result(self, step):
"""
Process the result of a step (after step execution).
:param step: Step object with result to process.
"""
step = self.steps.pop(0)
indent = make_indentation(2 * self.indent_size)
if self.show_aligned_keywords:
# -- RIGHT-ALIGN KEYWORDS (max. keyword width: 6):
text = u"%s%6s %s" % (indent, step.keyword, step.name)
else:
text = u"%s%s %s" % (indent, step.keyword, step.name)
text = escapes[step.status.name] + textwrap.shorten(text, width=self.LINE_WIDTH - 30) + escapes['reset'] + ' '
self.stream.write(text)
status_text = ': '
if step.status.name == 'passed':
status_text += u'\u2713'
else:
status_text += u'\u26A0'
if self.show_timings:
status_text += " in %0.3fs" % step.duration
unicode_errors = 0
if step.error_message:
try:
self.stream.write(u"%s\n%s\n" % (status_text, step.error_message))
except UnicodeError as e:
unicode_errors += 1
self.stream.write(u"%s\n" % status_text)
self.stream.write(u"%s while writing error message: %s\n" % \
(e.__class__.__name__, e))
if self.RAISE_OUTPUT_ERRORS:
raise
else:
self.stream.write(u"%s\n" % status_text)
if self.show_multiline:
if step.text:
try:
self.doc_string(step.text)
except UnicodeError as e:
unicode_errors += 1
self.stream.write(u"%s while writing docstring: %s\n" % \
(e.__class__.__name__, e))
if self.RAISE_OUTPUT_ERRORS:
raise
if step.table:
self.table(step.table)
|
the-stack_0_27307
|
import botocore
import unittest
import acky
try:
from unittest.mock import patch, call, ANY
except ImportError:
from mock import patch, call, ANY
GENERIC_AID = "aid-123"
GENERIC_AMI = "ami-123"
GENERIC_SUBNET = "subnet-test"
GENERIC_IP = "8.8.8.8"
GENERIC_INSTANCE = "i-123"
GENERIC_VOL = "vol-123"
GENERIC_AMI_DESCRIPTION = "{0} description".format(GENERIC_AMI)
class _AWS(object):
"""AWS object for mock testing with only basic features."""
def __init__(self):
self.session = botocore.session.get_session()
self.region = 'us-east-1'
@property
def ec2(self):
return acky.ec2.EC2(self)
class TestRegions(unittest.TestCase):
@patch('acky.api.AwsApiClient.call')
def test_regions(self, _call):
_AWS().ec2.regions()
_call.assert_called_once_with("DescribeRegions",
response_data_key=ANY)
class _TestEC2Collection():
class_name = None
commands = None
get_args = [{}]
get_expectations = None
create_args = ['test']
create_expectations = None
destroy_args = ['test']
destroy_expectations = None
instance = None
@patch('acky.api.AwsApiClient.call')
def test_get(self, _call):
"""Generic testing method for get() calls. Modify get_args and
get_expectations for better coverage.
"""
for args, expectation in zip(self.get_args,
self.get_expectations):
try:
self.instance.get(**args)
except NotImplementedError:
continue
assert expectation in _call.mock_calls,\
"Incorrect get call by {} (expected {} in {})".\
format(self.class_name, expectation,
_call.mock_calls)
@patch('acky.api.AwsApiClient.call')
def test_create(self, _call):
"""Generic testing method for create() calls. Modify create_args and
create_expectations for better coverage.
"""
for args, expectation in zip(self.create_args,
self.create_expectations):
try:
self.instance.create(**args)
except NotImplementedError:
continue
assert expectation in _call.mock_calls,\
"Incorrect create call by {} (expected {} in {})".\
format(self.class_name, expectation,
_call.mock_calls)
@patch('acky.api.AwsApiClient.call')
def test_destroy(self, _call):
"""Generic testing method for destroy() calls. Modify destroy_args
and destroy_expectations for better coverage.
"""
for args, expectation in zip(self.destroy_args,
self.destroy_expectations):
try:
self.instance.destroy(**args)
except NotImplementedError:
continue
assert expectation in _call.mock_calls,\
"Incorrect destroy call by {} (expected {} in {})".\
format(self.class_name, expectation,
_call.mock_calls)
class TestACLCollection(_TestEC2Collection, unittest.TestCase):
"""Not implemented yet."""
class_name = "ACLs"
commands = {'get': 'DescribeNetworkAcls',
'create': 'CreateNetworkAcl',
'destroy': 'DeleteNetworkAcl'}
get_expectations = [{}]
create_args = [{'vpc': "test"}]
create_expectations = [{}]
destroy_args = [{'acl': "test"}]
destroy_expectations = [{}]
instance = _AWS().ec2.ACLs
'''
class TestACECollection(_TestEC2Collection, unittest.TestCase):
"""Not implemented yet."""
class_name = "ACE"
commands = {'get': 'DescribeNetworkAcls',
'create': 'CreateNetworkAclEntry',
'destroy': 'DeleteNetworkAclEntry'}
get_expectations = [{}]
create_args = [{}]
create_expectations = [{}]
destroy_args = [{}]
destroy_expectations = [{}]
instance = _AWS().ec2.ACEs
'''
class ElasticIPCollection(_TestEC2Collection, unittest.TestCase):
class_name = "EIPs"
commands = {'get': 'DescribeAddresses',
'create': 'AllocateAddress',
'destroy': 'ReleaseAddress'}
get_expectations = [call(commands['get'],
response_data_key="Addresses")]
create_args = [{'vpc': True}, {'vpc': False}]
create_expectations = [call(commands['create'],
Domain="vpc"),
call(commands['create'],
Domain="standard")]
destroy_args = [{'eip_or_aid': GENERIC_IP},
{'eip_or_aid': GENERIC_AID, 'disassociate': False},
{'eip_or_aid': GENERIC_AID, 'disassociate': True}]
destroy_expectations = [call(commands['destroy'],
PublicIp='8.8.8.8',
response_data_key="return"),
call(commands['destroy'],
response_data_key="return",
AllocationId=GENERIC_AID),
call(commands['destroy'],
response_data_key="return",
AllocationId=GENERIC_AID)]
instance = _AWS().ec2.ElasticIPs
@patch('acky.api.AwsApiClient.call')
def test_associate(self, _call):
associate_command = "AssociateAddress"
associate_args = [{'eip_or_aid': GENERIC_IP, 'instance_id': "i-test"},
{'eip_or_aid': GENERIC_AID, 'instance_id': "i-test"},
{'eip_or_aid': GENERIC_AID,
'network_interface_id': "net-test"}]
associate_expectations = [call(associate_command,
PublicIp=GENERIC_IP,
InstanceId="i-test",
NetworkInterfaceId='',
PrivateIpAddress=''),
call(associate_command,
AllocationId=GENERIC_AID,
InstanceId="i-test",
NetworkInterfaceId='',
PrivateIpAddress=''),
call(associate_command,
AllocationId=GENERIC_AID,
InstanceId='',
NetworkInterfaceId="net-test",
PrivateIpAddress='')]
for args, expectation in zip(associate_args,
associate_expectations):
self.instance.associate(**args)
assert expectation in _call.mock_calls,\
"Incorrect associate call by {} (expected {} in {})".\
format(self.class_name, expectation,
_call.mock_calls)
@patch('acky.api.AwsApiClient.call')
def test_disassociate(self, _call):
disassociate_command = "DisassociateAddress"
disassociate_args = [{'eip_or_aid': GENERIC_IP},
{'eip_or_aid': GENERIC_AID}]
disassociate_expectations = [call(disassociate_command,
response_data_key="return",
PublicIp=GENERIC_IP),
call(disassociate_command,
response_data_key="return",
AllocationId=GENERIC_AID)]
for args, expectation in zip(disassociate_args,
disassociate_expectations):
self.instance.disassociate(**args)
assert expectation in _call.mock_calls,\
"Incorrect disassociate call by {} (expected {} in {})".\
format(self.class_name, expectation,
_call.mock_calls)
class TestInstanceCollection(_TestEC2Collection, unittest.TestCase):
class_name = "Instances"
commands = {'get': 'DescribeInstances',
'create': 'RunInstances',
'destroy': 'TerminateInstances'}
get_expectations = [call(commands['get'],
response_data_key="Reservations")]
create_args = [{'ami': GENERIC_AMI, 'count': 1}]
create_expectations = [call('RunInstances', MinCount=1,
ImageId=GENERIC_AMI,
response_data_key='Instances', MaxCount=1)]
destroy_args = [{'instance_id': GENERIC_INSTANCE}]
destroy_expectations = [call(operation='TerminateInstances',
response_data_key='TerminatingInstances',
InstanceIds=[GENERIC_INSTANCE])]
instance = _AWS().ec2.Instances
@patch('acky.api.AwsApiClient.call')
def test_control(self, _call):
ctrl_data = {'start': {'operation': "StartInstances",
'response_data_key': "StartingInstances",
'InstanceIds': [GENERIC_INSTANCE]},
'stop': {'operation': "StopInstances",
'response_data_key': "StoppingInstances",
'InstanceIds': [GENERIC_INSTANCE]},
'reboot': {'operation': "RebootInstances",
'response_data_key': "return",
'InstanceIds': [GENERIC_INSTANCE]},
'terminate': {'operation': "TerminateInstances",
'response_data_key': "TerminatingInstances",
'InstanceIds': [GENERIC_INSTANCE]},
'protect': {'operation': "ModifyInstanceAttribute",
'response_data_key': "return",
'Attribute': 'disableApiTermination',
'Value': 'true'},
'unprotect': {'operation': "ModifyInstanceAttribute",
'response_data_key': "return",
'Attribute': 'disableApiTermination',
'Value': 'false'}}
control_args = [{'instances': GENERIC_INSTANCE, 'action': "start"},
{'instances': GENERIC_INSTANCE, 'action': "stop"},
{'instances': GENERIC_INSTANCE, 'action': "protect"},
{'instances': GENERIC_INSTANCE, 'action': "unprotect"}]
control_expectations = [call(operation=ctrl_data['start']['operation'],
response_data_key=
ctrl_data['start']['response_data_key'],
InstanceIds=[GENERIC_INSTANCE]),
call(operation=ctrl_data['stop']['operation'],
response_data_key='StoppingInstances',
InstanceIds=['i-123']),
call(InstanceId='i-123',
Attribute='disableApiTermination',
operation=
ctrl_data['protect']['operation'],
response_data_key='return', Value='true'),
call(InstanceId='i-123',
Attribute='disableApiTermination',
operation=
ctrl_data['protect']['operation'],
response_data_key='return',
Value='false')]
for args, expectation in zip(control_args,
control_expectations):
self.instance.control(**args)
assert expectation in _call.mock_calls,\
"Incorrect control call by {} (expected {} in {})".\
format(self.class_name, expectation,
_call.mock_calls)
class TestKeyCollection(_TestEC2Collection, unittest.TestCase):
class_name = "KeyPairs"
commands = {'get': 'DescribeKeyPairs',
'create': 'CreateKeyPair',
'destroy': 'DeleteKeyPair'}
get_expectations = [call('DescribeKeyPairs', response_data_key='KeyPairs')]
create_args = [{'key_name': "test-key"}]
create_expectations = [call('CreateKeyPair', KeyName='test-key')]
destroy_args = [{'key_name': "test-key"}]
destroy_expectations = [call('DeleteKeyPair', KeyName='test-key')]
instance = _AWS().ec2.KeyPairs
class TestPlacementGroupCollection(_TestEC2Collection, unittest.TestCase):
class_name = "PlacementGroups"
commands = {'get': 'DescribePlacementGroups',
'create': 'CreatePlacementGroup',
'destroy': 'DeletePlacementGroup'}
get_expectations = [call('DescribePlacementGroups',
response_data_key='PlacementGroups')]
create_args = [{'group_name': 'test-group'}]
create_expectations = [call('CreatePlacementGroup',
group_name='test-group', strategy='cluster')]
destroy_args = [{'pg': 'test-group'}]
destroy_expectations = [call('DeletePlacementGroup',
group_name='test-group')]
instance = _AWS().ec2.PlacementGroups
class TestSecurityGroupCollection(_TestEC2Collection, unittest.TestCase):
class_name = "SecurityGroups"
commands = {'get': 'DescribeSecurityGroups',
'create': 'CreateSecurityGroup',
'destroy': 'DeleteSecurityGroup'}
get_expectations = [call(commands['get'],
response_data_key='SecurityGroups')]
create_args = [{'name': "test-group", 'description': "test-description"}]
create_expectations = [call(commands['create'], GroupName='test-group',
Description='test-description')]
destroy_args = [{'sg': "test-group"}]
destroy_expectations = [call(commands['destroy'], GroupId='test-group')]
instance = _AWS().ec2.SecurityGroups
'''
class TestIpPermissionsCollection(_TestEC2Collection, unittest.TestCase):
"""Not full implemented yet."""
class_name = "IpPermissions"
commands = {'get': '',
'create': '',
'destroy': ''}
get_expectations = [{}]
create_args = [{}]
create_expectations = [{}]
destroy_args = [{}]
destroy_expectations = [{}]
instance = _AWS().ec2.IpPermissions
'''
class TestVolumeCollection(_TestEC2Collection, unittest.TestCase):
class_name = "Volumes"
commands = {'get': 'DescribeVolumes',
'create': 'CreateVolume',
'destroy': 'DeleteVolume'}
get_expectations = [
call(commands['get'], VolumeIds=None, response_data_key='Volumes')
]
create_args = [{'az': "us-east-1", 'size_or_snap': 8}]
create_expectations = [call(commands['create'], encrypted=True,
AvailabilityZone='us-east-1', Size=8)]
destroy_args = [{'volume_id': GENERIC_VOL}]
destroy_expectations = [call(commands['destroy'],
response_data_key='return',
VolumeId='vol-123')]
instance = _AWS().ec2.Volumes
class TestSnapshotCollection(_TestEC2Collection, unittest.TestCase):
class_name = "Snapshots"
commands = {'get': "DescribeSnapshots",
'create': "CreateSnapshot",
'destroy': "DeleteSnapshot"}
get_expectations = [call(commands['get'],
response_data_key='Snapshots')]
create_args = [{'volume_id': GENERIC_VOL}]
create_expectations = [call(commands['create'],
Description=None, VolumeId='vol-123')]
destroy_args = [{'snapshot_id': "snap-123"}]
destroy_expectations = [call(commands['destroy'], SnapshotId='snap-123')]
instance = _AWS().ec2.Snapshots
class TestSubnetCollection(_TestEC2Collection, unittest.TestCase):
class_name = "Subnets"
commands = {'get': "DescribeSubnets",
'create': "CreateSubnet",
'destroy': "DeleteSubnet"}
get_expectations = [call('DescribeSubnets', response_data_key='Subnets')]
create_args = [{'vpc_id': "vpc-test", 'cidr': "cidr-test",
'availability_zone': "us-east-4b"}]
create_expectations = [call(commands['create'], VpcId='vpc-test',
CidrBlock='cidr-test',
response_data_key='Subnet')]
destroy_args = [{'subnet_id': GENERIC_SUBNET}]
destroy_expectations = [call(commands['destroy'], SubnetId='subnet-test',
response_data_key='return')]
instance = _AWS().ec2.Subnets
class TestVPCCollection(_TestEC2Collection, unittest.TestCase):
"""Not implemented."""
class_name = "VPCs"
commands = {'get': "DescribeVpcs",
'create': "",
'destroy': ""}
get_expectations = [call(commands['get'], response_data_key='Vpcs')]
create_args = [{'cidr': ""}]
create_expectations = [{}]
destroy_args = [{'vpc': ""}]
destroy_expectations = [{}]
instance = _AWS().ec2.VPCs
class TestTagCollection(_TestEC2Collection, unittest.TestCase):
class_name = "Tags"
commands = {'get': 'DescribeTags',
'create': 'CreateTags',
'destroy': 'DeleteTags'}
get_expectations = [call(commands['get'], response_data_key='Tags')]
create_args = [{'resource_ids': ["res-test"], 'tags': ["tag-test"]}]
create_expectations = [call(commands['create'], resources=["res-test"],
tags=["tag-test"])]
destroy_args = [{'resource_ids': ["res-test"], 'tags': ["tag-test"]}]
destroy_expectations = [call(commands['destroy'], resources=["res-test"],
tags=["tag-test"])]
instance = _AWS().ec2.Tags
class TestImageCollection(_TestEC2Collection, unittest.TestCase):
class_name = "Images"
commands = {'get': 'DescribeImages',
'create': 'CreateImage',
'destroy': 'DeregisterImage'}
get_expectations = [call(commands['get'], response_data_key='Images')]
create_args = [{
'instance_id': GENERIC_INSTANCE,
'name': GENERIC_INSTANCE,
'no_reboot': True,
'description': GENERIC_AMI_DESCRIPTION
}]
create_expectations = [call(
commands['create'],
response_data_key='ImageId',
Name=GENERIC_INSTANCE,
Description=GENERIC_AMI_DESCRIPTION,
InstanceId=GENERIC_INSTANCE,
NoReboot=True
)]
destroy_args = [{'image_id': GENERIC_AMI}]
destroy_expectations = [call(commands['destroy'],
ImageId=GENERIC_AMI)]
instance = _AWS().ec2.Images
|
the-stack_0_27309
|
# damped rotary spring
# limit rotary joint
from joint import b0, App, Segment, DampedRotarySpring, PivotJoint, RotaryLimitJoint, SimpleMotor, Vec2d
p0 = Vec2d(100, 110)
v = Vec2d(50, 0)
arm = Segment(p0, v)
PivotJoint(b0, arm.body, p0)
SimpleMotor(b0, arm.body, 1)
arm2 = Segment(p0+v, v)
PivotJoint(arm.body, arm2.body, v, (0, 0))
DampedRotarySpring(arm.body, arm2.body, 0, 10000000, 10000)
p0 = Vec2d(300, 110)
arm = Segment(p0, v)
PivotJoint(b0, arm.body, p0)
SimpleMotor(b0, arm.body, 1)
arm2 = Segment(p0+v, v)
PivotJoint(arm.body, arm2.body, v, (0, 0))
RotaryLimitJoint(arm.body, arm2.body, -1, 1)
App().run()
|
the-stack_0_27310
|
from distutils.core import setup
from distutils.core import Extension
from distutils.command.build_ext import build_ext as _build_ext
import sys
processor_type = "sun5i"
try:
input = raw_input
except NameError:
pass
def print_color(text):
"""
Print text in yellow :)
:param text: String to be colored
:return: Colored text
"""
return '\033[0;33m' + text + '\033[0m'
def print_warning():
"""
Print confirmation dialog
:return:
"""
print (print_color("Warning! ") + "Detected and target processor mismatch. ")
var = input("Do you want to continue [Y/n]? ")
if var == 'Y' or var == 'y':
return
else:
print ("Abort.")
sys.exit(1)
def check_processor():
"""
Detect processor type
:return:
"""
cpuinfo = open("/proc/cpuinfo", 'r')
for line in cpuinfo:
if "Hardware" in line:
processor = line.split(":")[1].rstrip()
if "sun4i" in processor:
print ("Detected processor: " + print_color(processor) + " (Probably Allwinner A10)")
elif "sun5i" in processor:
print ("Detected processor: " + print_color(processor) + " (Probably Allwinner A13)")
elif "sun7i" in processor:
print ("Detected processor: " + print_color(processor) + " (Probably Allwinner A20)")
else:
print ("Detected processor: " + print_color("unknown"))
if processor_type not in processor:
print_warning()
return
print ("No processor detected")
print_warning()
class build_ext(_build_ext):
def run(self):
check_processor()
_build_ext.run(self)
modules = [
Extension('pyA13SOM.gpio.gpio', sources=['pyA13SOM/gpio/gpio_lib.c', 'pyA13SOM/gpio/gpio.c']),
Extension('pyA13SOM.i2c', sources=['pyA13SOM/i2c/i2c_lib.c', 'pyA13SOM/i2c/i2c.c']),
Extension('pyA13SOM.spi', sources=['pyA13SOM/spi/spi_lib.c', 'pyA13SOM/spi/spi.c']),
Extension('pyA13SOM.gpio.connector', sources=['pyA13SOM/gpio/connector/connector.c']),
Extension('pyA13SOM.gpio.port', sources=['pyA13SOM/gpio/port/port.c']),
]
setup(
name='pyA13SOM',
version='0.2.1',
author='Stefan Mavrodiev',
author_email='[email protected]',
url='https://www.olimex.com/',
license='MIT',
packages=['pyA13SOM', 'pyA13SOM.gpio'],
description='Control GPIO, I2C and SPI',
long_description=open('README.txt').read() + open('CHANGES.txt').read(),
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Home Automation',
'Topic :: Software Development :: Embedded Systems'
],
ext_modules=modules,
cmdclass={'build_ext': build_ext}
)
|
the-stack_0_27311
|
""" There is an array with some numbers. All numbers are equal except for one. Try to find it!
It’s guaranteed that array contains at least 3 numbers.
The tests contain some very huge arrays, so think about performance. """
def find_uniq(arr):
arr.sort()
if arr[0] == arr[1]:
return arr[-1]
else:
return arr[0]
|
the-stack_0_27312
|
from model.config import *
from model.network import *
from model.utils import *
from model.evaluator import Evaluator
from keras import backend as K
import matplotlib.pyplot as plt
import warnings
import os
import pandas as pd
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
warnings.filterwarnings("ignore")
K.clear_session()
lstm_conf = LSTM_Config()
lstm_conf.update(use_previous_model=1,
label_name=['2.5min_mean_price_delta'],
feature_name=['previous_2.5min_mean_price', 'RSI_12',
'buy2', 'bc2', 'buy1', 'bc1', 'sale1', 'sc1', 'sale2', 'sc2', 'price',
'wb', 'amount', 'mid_price', 'MACD_DIF', 'MACD_DEA'],
training_set_proportion=0.8,
time_step=20,
epoch=30,
LSTM_neuron_num=[20, 20, 10],
load_file_name='lstm_2.5min_[20,20,10].h5'
)
# step 1: Get dataset (csv)
data = pd.read_csv(lstm_conf['data_file_path'], encoding='gbk')
# step 2: Select Feature
mid_price = data['mid_price']
feature_and_label_name = lstm_conf['feature_name']
feature_and_label_name.extend(lstm_conf['label_name'])
data = data[feature_and_label_name].values
# step 3: Preprocess
data = feature_normalize(data)
train_size = int(len(data) * lstm_conf['training_set_proportion'])
train, test = data[0:train_size, :], data[train_size:len(data), :]
train_x, train_y = data_transform_lstm_30s(train, lstm_conf['time_step'])
test_x, test_y = data_transform_lstm_30s(test, lstm_conf['time_step'])
# step 4: Create and train model_weight
network = LSTMs(lstm_conf)
if lstm_conf['use_previous_model'] == 1:
network.load(lstm_conf['load_file_name'])
elif lstm_conf['use_previous_model'] == 2:
network.load(lstm_conf['save_file_name'])
network.strong_train(train_x, train_y)
network.save('strongtrain_test.h5')
else:
network.train(train_x, train_y)
network.save(lstm_conf['save_file_name'])
# step 5: Predict
train_pred = network.predict(train_x)
test_pred = network.predict(test_x)
# step 6: Evaluate
evaluator = Evaluator()
print('simple evaluation')
# method1
acc = evaluator.evaluate_trend_simple(y_true=train_y, y_pred=train_pred)
print(acc)
acc = evaluator.evaluate_trend_simple(y_true=test_y, y_pred=test_pred)
print(acc)
# method 2
acc_train_list = evaluator.evaluate_divided_trend(train_y, train_pred)
acc_test_list = evaluator.evaluate_divided_trend(test_y, test_pred)
print('acc_train_list = ' + str(acc_train_list))
print('acc_test_list = ' + str(acc_test_list))
# step 7: Plot
train_mid_price = mid_price[0:train_size]
test_mid_price = mid_price[train_size:len(mid_price)]
'''
plt.figure(figsize=(200, 15))
plt.plot(train_y)
plt.plot(train_pred)
plt.legend(['train_label', 'train_predict'], loc='upper right')
plt.title('train_set plot')
plt.xlabel('time')
plt.ylabel('price')
plt.show()
'''
plt.figure(figsize=(50, 10))
plt.plot(test_y)
plt.plot(test_pred)
plt.legend(['test_label', 'test_predict'], loc='upper right')
plt.title('test_set plot')
plt.xlabel('time')
plt.ylabel('price')
plt.show()
|
the-stack_0_27313
|
"""OAuth support for Aiohttp lib."""
import asyncio
import base64
import hmac
import logging
import time
from hashlib import sha1
from random import SystemRandom
from urllib.parse import parse_qsl, quote, urlencode, urljoin, urlsplit
import aiohttp
import yarl
from aiohttp import BasicAuth, web
__version__ = "0.16.2"
__project__ = "aioauth-client"
__author__ = "Kirill Klenov <[email protected]>"
__license__ = "MIT"
RANDOM = SystemRandom().random
class User:
"""Store user's information."""
__slots__ = 'id', 'email', 'first_name', 'last_name', 'username', 'picture', \
'link', 'locale', 'city', 'country', 'gender'
def __init__(self, **info):
"""Initialize self data."""
for attr in self.__slots__:
setattr(self, attr, info.get(attr))
class Signature(object):
"""Abstract base class for signature methods."""
name = None
@staticmethod
def _escape(s):
"""URL escape a string."""
bs = s.encode('utf-8')
return quote(bs, '~').encode('utf-8')
def sign(self, consumer_secret, method, url, oauth_token_secret=None,
**params):
"""Abstract method."""
raise NotImplementedError('Shouldnt be called.')
class HmacSha1Signature(Signature):
"""HMAC-SHA1 signature-method."""
name = 'HMAC-SHA1'
def sign(self, consumer_secret, method, url, oauth_token_secret=None,
**params):
"""Create a signature using HMAC-SHA1."""
# build the url the same way aiohttp will build the query later on
# cf https://github.com/KeepSafe/aiohttp/blob/master/aiohttp/client.py#L151
# and https://github.com/KeepSafe/aiohttp/blob/master/aiohttp/client_reqrep.py#L81
url = yarl.URL(url).with_query(sorted(params.items()))
url, params = str(url).split('?', 1)
method = method.upper()
signature = b"&".join(map(self._escape, (method, url, params)))
key = self._escape(consumer_secret) + b"&"
if oauth_token_secret:
key += self._escape(oauth_token_secret)
hashed = hmac.new(key, signature, sha1)
return base64.b64encode(hashed.digest()).decode()
class PlaintextSignature(Signature):
"""PLAINTEXT signature-method."""
name = 'PLAINTEXT'
def sign(self, consumer_secret, method, url, oauth_token_secret=None,
**params):
"""Create a signature using PLAINTEXT."""
key = self._escape(consumer_secret) + b'&'
if oauth_token_secret:
key += self._escape(oauth_token_secret)
return key.decode()
class ClientRegistry(type):
"""Meta class to register OAUTH clients."""
clients = {}
def __new__(mcs, name, bases, params):
"""Save created client in self registry."""
cls = super().__new__(mcs, name, bases, params)
mcs.clients[cls.name] = cls
return cls
class Client(object, metaclass=ClientRegistry):
"""Base abstract OAuth Client class."""
access_token_key = 'access_token'
shared_key = 'oauth_verifier'
access_token_url = None
authorize_url = None
base_url = None
name = None
user_info_url = None
def __init__(self, base_url=None, authorize_url=None, access_token_key=None,
access_token_url=None, session=None, logger=None):
"""Initialize the client."""
self.base_url = base_url or self.base_url
self.authorize_url = authorize_url or self.authorize_url
self.access_token_key = access_token_key or self.access_token_key
self.access_token_url = access_token_url or self.access_token_url
self.logger = logger or logging.getLogger('OAuth: %s' % self.name)
self.session = session
def _get_url(self, url):
"""Build provider's url. Join with base_url part if needed."""
if self.base_url and not url.startswith(('http://', 'https://')):
return urljoin(self.base_url, url)
return url
def __str__(self):
"""String representation."""
return "%s %s" % (self.name.title(), self.base_url)
def __repr__(self):
"""String representation."""
return "<%s>" % self
async def _request(self, method, url, loop=None, timeout=None, raw_content=False, **kwargs):
"""Make a request through AIOHTTP."""
session = self.session or aiohttp.ClientSession(
loop=loop, conn_timeout=timeout, read_timeout=timeout)
try:
async with session.request(method, url, **kwargs) as response:
response.raise_for_status()
if 'json' in response.headers.get('CONTENT-TYPE'):
data = await response.json()
elif raw_content:
data = await response.content.read()
else:
data = await response.text()
data = dict(parse_qsl(data))
return data
except asyncio.TimeoutError:
raise web.HTTPBadRequest(reason='HTTP Timeout')
finally:
if not self.session and not session.closed:
await session.close()
def request(self, method, url, params=None, headers=None, loop=None,
**aio_kwargs):
"""Make a request to provider."""
raise NotImplementedError('Shouldnt be called.')
async def user_info(self, loop=None, **kwargs):
"""Load user information from provider."""
if not self.user_info_url:
raise NotImplementedError(
'The provider doesnt support user_info method.')
data = await self.request('GET', self.user_info_url, loop=loop, **kwargs)
user = User(**dict(self.user_parse(data)))
return user, data
@staticmethod
def user_parse(data):
"""Parse user's information from given provider data."""
yield 'id', None
class OAuth1Client(Client):
"""Implement OAuth1."""
name = 'oauth1'
access_token_key = 'oauth_token'
request_token_url = None
version = '1.0'
def __init__(self, consumer_key, consumer_secret, base_url=None,
authorize_url=None,
oauth_token=None, oauth_token_secret=None,
request_token_url=None,
access_token_url=None, access_token_key=None, session=None, logger=None,
signature=None,
**params):
"""Initialize the client."""
super().__init__(base_url, authorize_url, access_token_key,
access_token_url, session, logger)
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.request_token_url = request_token_url or self.request_token_url
self.params = params
self.signature = signature or HmacSha1Signature()
def get_authorize_url(self, request_token=None, **params):
"""Return formatted authorization URL."""
params.update({'oauth_token': request_token or self.oauth_token})
return self.authorize_url + '?' + urlencode(params)
def request(self, method, url, params=None, **aio_kwargs):
"""Make a request to provider."""
oparams = {
'oauth_consumer_key': self.consumer_key,
'oauth_nonce': sha1(str(RANDOM()).encode('ascii')).hexdigest(),
'oauth_signature_method': self.signature.name,
'oauth_timestamp': str(int(time.time())),
'oauth_version': self.version,
}
oparams.update(params or {})
if self.oauth_token:
oparams['oauth_token'] = self.oauth_token
url = self._get_url(url)
if urlsplit(url).query:
raise ValueError(
'Request parameters should be in the "params" parameter, '
'not inlined in the URL')
oparams['oauth_signature'] = self.signature.sign(
self.consumer_secret, method, url,
oauth_token_secret=self.oauth_token_secret, **oparams)
self.logger.debug("%s %s", url, oparams)
return self._request(method, url, params=oparams, **aio_kwargs)
async def get_request_token(self, loop=None, **params):
"""Get a request_token and request_token_secret from OAuth1 provider."""
params = dict(self.params, **params)
data = await self.request('GET', self.request_token_url, params=params, loop=loop)
self.oauth_token = data.get('oauth_token')
self.oauth_token_secret = data.get('oauth_token_secret')
return self.oauth_token, self.oauth_token_secret, data
async def get_access_token(self, oauth_verifier, request_token=None, loop=None, **params):
"""Get access_token from OAuth1 provider.
:returns: (access_token, access_token_secret, provider_data)
"""
# Possibility to provide REQUEST DATA to the method
if not isinstance(oauth_verifier, str) and self.shared_key in oauth_verifier:
oauth_verifier = oauth_verifier[self.shared_key]
if request_token and self.oauth_token != request_token:
raise web.HTTPBadRequest(
reason='Failed to obtain OAuth 1.0 access token. '
'Request token is invalid')
data = await self.request('POST', self.access_token_url, params={
'oauth_verifier': oauth_verifier, 'oauth_token': request_token}, loop=loop)
self.oauth_token = data.get('oauth_token')
self.oauth_token_secret = data.get('oauth_token_secret')
return self.oauth_token, self.oauth_token_secret, data
class OAuth2Client(Client):
"""Implement OAuth2."""
name = 'oauth2'
shared_key = 'code'
def __init__(self, client_id, client_secret, base_url=None,
authorize_url=None,
access_token=None, access_token_url=None,
access_token_key=None, session=None, logger=None,
**params):
"""Initialize the client."""
super().__init__(base_url, authorize_url, access_token_key,
access_token_url, session, logger)
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.params = params
def get_authorize_url(self, **params):
"""Return formatted authorize URL."""
params = dict(self.params, **params)
params.update({'client_id': self.client_id, 'response_type': 'code'})
return self.authorize_url + '?' + urlencode(params)
def request(self, method, url, params=None, headers=None, access_token=None, **aio_kwargs):
"""Request OAuth2 resource."""
url = self._get_url(url)
params = params or {}
access_token = access_token or self.access_token
if access_token:
if isinstance(params, list):
if self.access_token_key not in dict(params):
params.append((self.access_token_key, access_token))
else:
params[self.access_token_key] = access_token
headers = headers or {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
return self._request(method, url, params=params, headers=headers, **aio_kwargs)
async def get_access_token(self, code, loop=None, redirect_uri=None, **payload):
"""Get an access_token from OAuth provider.
:returns: (access_token, provider_data)
"""
# Possibility to provide REQUEST DATA to the method
payload.setdefault('grant_type', 'authorization_code')
payload.update({'client_id': self.client_id, 'client_secret': self.client_secret})
if not isinstance(code, str) and self.shared_key in code:
code = code[self.shared_key]
payload['refresh_token' if payload['grant_type'] == 'refresh_token' else 'code'] = code
redirect_uri = redirect_uri or self.params.get('redirect_uri')
if redirect_uri:
payload['redirect_uri'] = redirect_uri
self.access_token = None
data = await self.request('POST', self.access_token_url, data=payload, loop=loop)
try:
self.access_token = data['access_token']
except KeyError:
self.logger.error(
'Error when getting the access token.\nData returned by OAuth server: %r',
data,
)
raise web.HTTPBadRequest(reason='Failed to obtain OAuth access token.')
return self.access_token, data
class BitbucketClient(OAuth1Client):
"""Support Bitbucket.
* Dashboard: https://bitbucket.org/account/user/peterhudec/api
* Docs: https://confluence.atlassian.com/display/BITBUCKET/oauth+Endpoint
* API refer: https://confluence.atlassian.com/display/BITBUCKET/Using+the+Bitbucket+REST+APIs
"""
access_token_url = 'https://bitbucket.org/!api/1.0/oauth/access_token'
authorize_url = 'https://bitbucket.org/!api/1.0/oauth/authenticate'
base_url = 'https://api.bitbucket.org/1.0/'
name = 'bitbucket'
request_token_url = 'https://bitbucket.org/!api/1.0/oauth/request_token'
user_info_url = 'https://api.bitbucket.org/1.0/user'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
user_ = data.get('user')
yield 'id', user_.get('username')
yield 'username', user_.get('username')
yield 'first_name', user_.get('first_name')
yield 'last_name', user_.get('last_name')
yield 'picture', user_.get('avatar')
yield 'link', user_.get('resource_url')
class Bitbucket2Client(OAuth2Client):
"""Support Bitbucket API 2.0.
* Dashboard: https://bitbucket.org/account/user/peterhudec/api
* Docs:https://confluence.atlassian.com/display/BITBUCKET/OAuth+on+Bitbucket+Cloud
* API refer: https://confluence.atlassian.com/display/BITBUCKET/Using+the+Bitbucket+REST+APIs
"""
access_token_url = 'https://bitbucket.org/site/oauth2/access_token'
authorize_url = 'https://bitbucket.org/site/oauth2/authorize'
base_url = 'https://api.bitbucket.org/2.0/'
name = 'bitbucket'
user_info_url = 'https://api.bitbucket.org/2.0/user'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
yield 'id', data.get('uuid')
yield 'username', data.get('username')
yield 'last_name', data.get('display_name')
links = data.get('links', {})
yield 'picture', links.get('avatar', {}).get('href')
yield 'link', links.get('html', {}).get('href')
def _request(self, method, url, headers=None, params=None, **aio_kwargs):
"""Setup Authorization Header.."""
auth = None
access_token = params.pop(self.access_token_key, None)
if access_token:
headers['Authorization'] = "Bearer %s" % access_token
else:
auth = BasicAuth(self.client_id, self.client_secret)
return super(Bitbucket2Client, self)._request(
method, url, headers=headers, params=params, auth=auth, **aio_kwargs)
class DiscordClient(OAuth2Client):
"""Support Discord API
* Dashboard: https://discordapp.com/developers/applications/me
* Docs: https://discordapp.com/developers/docs/topics/oauth2
* API refer: https://discordapp.com/developers/docs/reference
"""
access_token_url = 'https://discordapp.com/api/oauth2/token'
authorize_url = 'https://discordapp.com/api/oauth2/authorize'
base_url = 'https://discordapp.com/api/v6/'
name = 'discord'
user_info_url = 'https://discordapp.com/api/v6/users/@me'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
yield 'id', data.get('id')
yield 'username', data.get('username')
yield 'discriminator', data.get('discriminator')
yield 'picture', "https://cdn.discordapp.com/avatars/{}/{}.png".format(
data.get('id'), data.get('avatar'))
def _request(self, method, url, headers=None, params=None, **aio_kwargs):
"""Setup Authorization Header.."""
access_token = params.pop(self.access_token_key, None)
if access_token:
headers['Authorization'] = "Bearer %s" % access_token
return super(DiscordClient, self)._request(
method, url, headers=headers, params=params, **aio_kwargs)
class Flickr(OAuth1Client):
"""Support Flickr.
* Dashboard: https://www.flickr.com/services/apps/
* Docs: https://www.flickr.com/services/api/auth.oauth.html
* API reference: https://www.flickr.com/services/api/
"""
access_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
base_url = 'https://api.flickr.com/'
name = 'flickr'
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
user_info_url = 'http://api.flickr.com/services/rest?' \
'method=flickr.test.login&format=json&nojsoncallback=1' # noqa
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
user_ = data.get('user', {})
yield 'id', data.get('user_nsid') or user_.get('id')
yield 'username', user_.get('username', {}).get('_content')
first_name, _, last_name = data.get(
'fullname', {}).get('_content', '').partition(' ')
yield 'first_name', first_name
yield 'last_name', last_name
class LichessClient(OAuth2Client):
"""Support Lichess.
* Dashboard: https://lichess.org/account/oauth/app
* Docs: https://lichess.org/api#section/Authentication
* API reference: https://lichess.org/api
"""
access_token_url = 'https://oauth.lichess.org/oauth'
authorize_url = 'https://oauth.lichess.org/oauth/authorize'
base_url = 'https://lichess.org/'
name = 'lichess'
user_info_url = 'https://lichess.org/api/account'
@staticmethod
def user_parse(data):
"""Parse information from provider."""
yield 'id', data.get('id')
yield 'username', data.get('username')
yield 'first_name', data.get('profile').get("firstName")
yield 'last_name', data.get('profile').get("lastName")
yield 'country', data.get('profile').get("country")
def _request(self, method, url, headers=None, params=None, **aio_kwargs):
"""Setup Authorization Header.."""
access_token = params.pop(self.access_token_key, None)
if access_token:
headers['Authorization'] = "Bearer %s" % access_token
return super(LichessClient, self)._request(
method, url, headers=headers, params=params, **aio_kwargs)
class Meetup(OAuth1Client):
"""Support Meetup.
* Dashboard: http://www.meetup.com/meetup_api/oauth_consumers/
* Docs: http://www.meetup.com/meetup_api/auth/#oauth
* API: http://www.meetup.com/meetup_api/docs/
"""
access_token_url = 'https://api.meetup.com/oauth/access/'
authorize_url = 'http://www.meetup.com/authorize/'
base_url = 'https://api.meetup.com/2/'
name = 'meetup'
request_token_url = 'https://api.meetup.com/oauth/request/'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
yield 'id', data.get('id') or data.get('member_id')
yield 'locale', data.get('lang')
yield 'picture', data.get('photo', {}).get('photo_link')
class Plurk(OAuth1Client):
"""Support Plurk.
* Dashboard: http://www.plurk.com/PlurkApp/
* API: http://www.plurk.com/API
* API explorer: http://www.plurk.com/OAuth/test/
"""
access_token_url = 'http://www.plurk.com/OAuth/access_token'
authorize_url = 'http://www.plurk.com/OAuth/authorize'
base_url = 'http://www.plurk.com/APP/'
name = 'plurk'
request_token_url = 'http://www.plurk.com/OAuth/request_token'
user_info_url = 'http://www.plurk.com/APP/Profile/getOwnProfile'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
_user = data.get('user_info', {})
_id = _user.get('id') or _user.get('uid')
yield 'id', _id
yield 'locale', _user.get('default_lang')
yield 'username', _user.get('display_name')
first_name, _, last_name = _user.get('full_name', '').partition(' ')
yield 'first_name', first_name
yield 'last_name', last_name
yield 'picture', 'http://avatars.plurk.com/{0}-big2.jpg'.format(_id)
city, country = map(lambda s: s.strip(),
_user.get('location', ',').split(','))
yield 'city', city
yield 'country', country
class TwitterClient(OAuth1Client):
"""Support Twitter.
* Dashboard: https://dev.twitter.com/apps
* Docs: https://dev.twitter.com/docs
* API reference: https://dev.twitter.com/docs/api
"""
access_token_url = 'https://api.twitter.com/oauth/access_token'
authorize_url = 'https://api.twitter.com/oauth/authorize'
base_url = 'https://api.twitter.com/1.1/'
name = 'twitter'
request_token_url = 'https://api.twitter.com/oauth/request_token'
user_info_url = 'https://api.twitter.com/1.1/account/' \
'verify_credentials.json'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
yield 'id', data.get('id') or data.get('user_id')
first_name, _, last_name = data['name'].partition(' ')
yield 'first_name', first_name
yield 'last_name', last_name
yield 'picture', data.get('profile_image_url')
yield 'locale', data.get('lang')
yield 'link', data.get('url')
yield 'username', data.get('screen_name')
city, _, country = map(lambda s: s.strip(),
data.get('location', '').partition(','))
yield 'city', city
yield 'country', country
class TumblrClient(OAuth1Client):
"""Support Tumblr.
* Dashboard: http://www.tumblr.com/oauth/apps
* Docs: http://www.tumblr.com/docs/en/api/v2#auth
* API reference: http://www.tumblr.com/docs/en/api/v2
"""
access_token_url = 'http://www.tumblr.com/oauth/access_token'
authorize_url = 'http://www.tumblr.com/oauth/authorize'
base_url = 'https://api.tumblr.com/v2/'
name = 'tumblr'
request_token_url = 'http://www.tumblr.com/oauth/request_token'
user_info_url = 'http://api.tumblr.com/v2/user/info'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
_user = data.get('response', {}).get('user', {})
yield 'id', _user.get('name')
yield 'username', _user.get('name')
yield 'link', _user.get('blogs', [{}])[0].get('url')
class VimeoClient(OAuth1Client):
"""Support Vimeo."""
access_token_url = 'https://vimeo.com/oauth/access_token'
authorize_url = 'https://vimeo.com/oauth/authorize'
base_url = 'https://vimeo.com/api/rest/v2/'
name = 'vimeo'
request_token_url = 'https://vimeo.com/oauth/request_token'
user_info_url = 'http://vimeo.com/api/rest/v2?' \
'format=json&method=vimeo.oauth.checkAccessToken'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
_user = data.get('oauth', {}).get('user', {})
yield 'id', _user.get('id')
yield 'username', _user.get('username')
first_name, _, last_name = _user.get('display_name').partition(' ')
yield 'first_name', first_name
yield 'last_name', last_name
class YahooClient(OAuth1Client):
"""Support Yahoo.
* Dashboard: https://developer.vimeo.com/apps
* Docs: https://developer.vimeo.com/apis/advanced#oauth-endpoints
* API reference: https://developer.vimeo.com/apis
"""
access_token_url = 'https://api.login.yahoo.com/oauth/v2/get_token'
authorize_url = 'https://api.login.yahoo.com/oauth/v2/request_auth'
base_url = 'https://query.yahooapis.com/v1/'
name = 'yahoo'
request_token_url = 'https://api.login.yahoo.com/oauth/v2/get_request_token'
user_info_url = ('https://query.yahooapis.com/v1/yql?q=select%20*%20from%20'
'social.profile%20where%20guid%3Dme%3B&format=json')
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
_user = data.get('query', {}).get('results', {}).get('profile', {})
yield 'id', _user.get('guid')
yield 'username', _user.get('username')
yield 'link', _user.get('profileUrl')
emails = _user.get('emails')
if isinstance(emails, list):
for email in emails:
if 'primary' in list(email.keys()):
yield 'email', email.get('handle')
elif isinstance(emails, dict):
yield 'email', emails.get('handle')
yield 'picture', _user.get('image', {}).get('imageUrl')
city, country = map(lambda s: s.strip(),
_user.get('location', ',').split(','))
yield 'city', city
yield 'country', country
class AmazonClient(OAuth2Client):
"""Support Amazon.
* Dashboard: https://developer.amazon.com/lwa/sp/overview.html
* Docs: https://developer.amazon.com/public/apis/engage/login-with-amazon/docs/conceptual_overview.html # noqa
* API reference: https://developer.amazon.com/public/apis
"""
access_token_url = 'https://api.amazon.com/auth/o2/token'
authorize_url = 'https://www.amazon.com/ap/oa'
base_url = 'https://api.amazon.com/'
name = 'amazon'
user_info_url = 'https://api.amazon.com/user/profile'
@staticmethod
def user_parse(data):
"""Parse information from provider."""
yield 'id', data.get('user_id')
class EventbriteClient(OAuth2Client):
"""Support Eventbrite.
* Dashboard: http://www.eventbrite.com/myaccount/apps/
* Docs: https://developer.eventbrite.com/docs/auth/
* API: http://developer.eventbrite.com/docs/
"""
access_token_url = 'https://www.eventbrite.com/oauth/token'
authorize_url = 'https://www.eventbrite.com/oauth/authorize'
base_url = 'https://www.eventbriteapi.com/v3/'
name = 'eventbrite'
user_info_url = 'https://www.eventbriteapi.com/v3/users/me'
@staticmethod
def user_parse(data):
"""Parse information from provider."""
for email in data.get('emails', []):
if email.get('primary'):
yield 'id', email.get('email')
yield 'email', email.get('email')
break
class FacebookClient(OAuth2Client):
"""Support Facebook.
* Dashboard: https://developers.facebook.com/apps
* Docs: http://developers.facebook.com/docs/howtos/login/server-side-login/
* API reference: http://developers.facebook.com/docs/reference/api/
* API explorer: http://developers.facebook.com/tools/explorer
"""
access_token_url = 'https://graph.facebook.com/oauth/access_token'
authorize_url = 'https://www.facebook.com/dialog/oauth'
base_url = 'https://graph.facebook.com/v2.4'
name = 'facebook'
user_info_url = 'https://graph.facebook.com/me'
async def user_info(self, params=None, **kwargs):
"""Facebook required fields-param."""
params = params or {}
params[
'fields'] = 'id,email,first_name,last_name,name,link,locale,' \
'gender,location'
return await super(FacebookClient, self).user_info(params=params, **kwargs)
@staticmethod
def user_parse(data):
"""Parse information from provider."""
id_ = data.get('id')
yield 'id', id_
yield 'email', data.get('email')
yield 'first_name', data.get('first_name')
yield 'last_name', data.get('last_name')
yield 'username', data.get('name')
yield 'picture', 'http://graph.facebook.com/{0}/picture?' \
'type=large'.format(id_)
yield 'link', data.get('link')
yield 'locale', data.get('locale')
yield 'gender', data.get('gender')
location = data.get('location', {}).get('name')
if location:
split_location = location.split(', ')
yield 'city', split_location[0].strip()
if len(split_location) > 1:
yield 'country', split_location[1].strip()
class FoursquareClient(OAuth2Client):
"""Support Foursquare.
* Dashboard: https://foursquare.com/developers/apps
* Docs: https://developer.foursquare.com/overview/auth.html
* API reference: https://developer.foursquare.com/docs/
"""
access_token_url = 'https://foursquare.com/oauth2/access_token'
authorize_url = 'https://foursquare.com/oauth2/authenticate'
base_url = 'https://api.foursquare.com/v2/'
name = 'foursquare'
user_info_url = 'https://api.foursquare.com/v2/users/self'
@staticmethod
def user_parse(data):
"""Parse information from the provider."""
user = data.get('response', {}).get('user', {})
yield 'id', user.get('id')
yield 'email', user.get('contact', {}).get('email')
yield 'first_name', user.get('firstName')
yield 'last_name', user.get('lastName')
city, country = user.get('homeCity', ', ').split(', ')
yield 'city', city
yield 'country', country
class GithubClient(OAuth2Client):
"""Support Github.
* Dashboard: https://github.com/settings/applications/
* Docs: http://developer.github.com/v3/#authentication
* API reference: http://developer.github.com/v3/
"""
access_token_url = 'https://github.com/login/oauth/access_token'
authorize_url = 'https://github.com/login/oauth/authorize'
base_url = 'https://api.github.com'
name = 'github'
user_info_url = 'https://api.github.com/user'
@staticmethod
def user_parse(data):
"""Parse information from provider."""
yield 'id', data.get('id')
yield 'email', data.get('email')
first_name, _, last_name = (data.get('name') or '').partition(' ')
yield 'first_name', first_name
yield 'last_name', last_name
yield 'username', data.get('login')
yield 'picture', data.get('avatar_url')
yield 'link', data.get('html_url')
location = data.get('location', '')
if location:
split_location = location.split(',')
yield 'country', split_location[0].strip()
if len(split_location) > 1:
yield 'city', split_location[1].strip()
class GoogleClient(OAuth2Client):
"""Support Google.
* Dashboard: https://console.developers.google.com/project
* Docs: https://developers.google.com/accounts/docs/OAuth2
* API reference: https://developers.google.com/gdata/docs/directory
* API explorer: https://developers.google.com/oauthplayground/
"""
authorize_url = 'https://accounts.google.com/o/oauth2/v2/auth'
access_token_url = 'https://www.googleapis.com/oauth2/v4/token'
base_url = 'https://www.googleapis.com/userinfo/v2/'
name = 'google'
user_info_url = 'https://www.googleapis.com/userinfo/v2/me'
@staticmethod
def user_parse(data):
"""Parse information from provider."""
yield 'id', data.get('id')
yield 'email', data.get('email')
yield 'first_name', data.get('given_name')
yield 'last_name', data.get('family_name')
yield 'link', data.get('link')
yield 'locale', data.get('locale')
yield 'picture', data.get('picture')
yield 'gender', data.get('gender')
class VKClient(OAuth2Client):
"""Support vk.com.
* Dashboard: http://vk.com/editapp?id={consumer_key}
* Docs: http://vk.com/developers.php?oid=-17680044&p=Authorizing_Sites
* API reference: http://vk.com/developers.php?oid=-17680044&p=API_Method_Description
"""
authorize_url = 'http://api.vk.com/oauth/authorize'
access_token_url = 'https://api.vk.com/oauth/access_token'
user_info_url = 'https://api.vk.com/method/getProfiles?' \
'fields=uid,first_name,last_name,nickname,sex,bdate,city,' \
'country,timezone,photo_big' # noqa
name = 'vk'
base_url = 'https://api.vk.com'
def __init__(self, version='5.9.2', *args, **kwargs):
"""Set default scope."""
super(VKClient, self).__init__(*args, **kwargs)
self.user_info_url = "{0}&v={1}".format(self.user_info_url, version)
self.params.setdefault('scope', 'offline')
@staticmethod
def user_parse(data):
"""Parse information from provider."""
resp = data.get('response', [{}])[0]
yield 'id', resp.get('uid')
yield 'first_name', resp.get('first_name')
yield 'last_name', resp.get('last_name')
yield 'username', resp.get('nickname')
yield 'city', resp.get('city')
yield 'country', resp.get('country')
yield 'picture', resp.get('photo_big')
class OdnoklassnikiClient(OAuth2Client):
"""Support ok.ru.
* Dashboard: http://ok.ru/dk?st.cmd=appsInfoMyDevList
* Docs: https://apiok.ru/wiki/display/api/Authorization+OAuth+2.0
* API reference: https://apiok.ru/wiki/pages/viewpage.action?pageId=49381398
"""
authorize_url = 'https://connect.ok.ru/oauth/authorize'
access_token_url = 'https://api.odnoklassniki.ru/oauth/token.do'
user_info_url = 'http://api.ok.ru/api/users/getCurrentUser?' \
'fields=uid,first_name,last_name,gender,city,' \
'country,pic128max' # noqa
name = 'odnoklassniki'
base_url = 'https://api.ok.ru'
def __init__(self, *args, **kwargs):
"""Set default scope."""
super().__init__(*args, **kwargs)
self.params.setdefault('scope', 'offline')
@staticmethod
def user_parse(data):
"""Parse information from provider."""
resp = data.get('response', [{}])[0]
yield 'id', resp.get('uid')
yield 'first_name', resp.get('first_name')
yield 'last_name', resp.get('last_name')
location = resp.get('location', {})
yield 'city', location.get('city')
yield 'country', location.get('country')
yield 'picture', resp.get('pic128max')
class YandexClient(OAuth2Client):
"""Support Yandex.
* Dashboard: https://oauth.yandex.com/client/my
* Docs: http://api.yandex.com/oauth/doc/dg/reference/obtain-access-token.xml
"""
access_token_url = 'https://oauth.yandex.com/token'
access_token_key = 'oauth_token'
authorize_url = 'https://oauth.yandex.com/authorize'
base_url = 'https://login.yandex.ru/info'
name = 'yandex'
user_info_url = 'https://login.yandex.ru/info'
@staticmethod
def user_parse(data):
"""Parse information from provider."""
yield 'id', data.get('id')
yield 'username', data.get('login')
yield 'email', data.get('default_email')
yield 'first_name', data.get('first_name')
yield 'last_name', data.get('last_name')
yield 'picture', 'https://avatars.yandex.net/get-yapic/%s/islands-200' % data.get(
'default_avatar_id', 0)
class LinkedinClient(OAuth2Client):
"""Support linkedin.com.
* Dashboard: https://www.linkedin.com/developer/apps
* Docs: https://developer.linkedin.com/docs/oauth2
* API reference: https://developer.linkedin.com/docs/rest-api
"""
name = 'linkedin'
access_token_key = 'oauth2_access_token'
access_token_url = 'https://www.linkedin.com/oauth/v2/accessToken'
authorize_url = 'https://www.linkedin.com/oauth/v2/authorization'
user_info_url = (
'https://api.linkedin.com/v1/people/~:('
'id,email-address,first-name,last-name,formatted-name,picture-url,'
'public-profile-url,location)?format=json'
)
@staticmethod
def user_parse(data):
"""Parse user data."""
yield 'id', data.get('id')
yield 'email', data.get('emailAddress')
yield 'first_name', data.get('firstName')
yield 'last_name', data.get('lastName')
yield 'username', data.get('formattedName')
yield 'picture', data.get('pictureUrl')
yield 'link', data.get('publicProfileUrl')
yield 'country', data.get('location', {}).get('name')
class PinterestClient(OAuth2Client):
"""Support pinterest.com.
* Dashboard: https://developers.pinterest.com/apps/
* Docs: https://developers.pinterest.com/docs/api/overview/
"""
name = 'pinterest'
access_token_url = 'https://api.pinterest.com/v1/oauth/token'
authorize_url = 'https://api.pinterest.com/oauth/'
user_info_url = 'https://api.pinterest.com/v1/me/'
@staticmethod
def user_parse(data):
"""Parse user data."""
data = data.get('data', {})
yield 'id', data.get('id')
yield 'first_name', data.get('first_name')
yield 'last_name', data.get('last_name')
yield 'link', data.get('url')
# pylama:ignore=E501
|
the-stack_0_27314
|
''' Distutils / setuptools helpers
'''
import os
from os.path import join as pjoin, split as psplit, splitext
import tempfile
import shutil
from distutils.command.install_scripts import install_scripts
from distutils.errors import CompileError, LinkError
from distutils import log
BAT_TEMPLATE = \
r"""@echo off
REM wrapper to use shebang first line of {FNAME}
set mypath=%~dp0
set pyscript="%mypath%{FNAME}"
set /p line1=<%pyscript%
if "%line1:~0,2%" == "#!" (goto :goodstart)
echo First line of %pyscript% does not start with "#!"
exit /b 1
:goodstart
set py_exe=%line1:~2%
REM quote exe in case of spaces in path name
set py_exe="%py_exe%"
call %py_exe% %pyscript% %*
"""
class install_scripts_bat(install_scripts):
""" Make scripts executable on Windows
Scripts are bare file names without extension on Unix, fitting (for example)
Debian rules. They identify as python scripts with the usual ``#!`` first
line. Unix recognizes and uses this first "shebang" line, but Windows does
not. So, on Windows only we add a ``.bat`` wrapper of name
``bare_script_name.bat`` to call ``bare_script_name`` using the python
interpreter from the #! first line of the script.
Notes
-----
See discussion at
http://matthew-brett.github.com/pydagogue/installing_scripts.html and
example at git://github.com/matthew-brett/myscripter.git for more
background.
"""
def run(self):
install_scripts.run(self)
if not os.name == "nt":
return
for filepath in self.get_outputs():
# If we can find an executable name in the #! top line of the script
# file, make .bat wrapper for script.
with open(filepath, 'rt') as fobj:
first_line = fobj.readline()
if not (first_line.startswith('#!') and
'python' in first_line.lower()):
log.info("No #!python executable found, skipping .bat "
"wrapper")
continue
pth, fname = psplit(filepath)
froot, ext = splitext(fname)
bat_file = pjoin(pth, froot + '.bat')
bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)
log.info("Making %s wrapper for %s" % (bat_file, filepath))
if self.dry_run:
continue
with open(bat_file, 'wt') as fobj:
fobj.write(bat_contents)
TEST_C = """
int main(int argc, char** argv) { return(0); }
"""
def add_flag_checking(build_ext_class, input_flags):
""" Override input `build_ext_class` to check compiler `input_flags`
Parameters
----------
build_ext_class : class
Class implementing ``distutils.command.build_ext.build_ext`` interface,
with a ``build_extensions`` method.
input_flags : sequence
A sequence of compiler flags. We check each to see whether a simple C
source file will compile, and omit flags that cause a compile error
Returns
-------
checker_class : class
A class with similar interface to
``distutils.command.build_ext.build_ext``, that adds all working
`input_flag` values to the ``extra_compile_args`` and
``extra_link_args`` attributes of extensions, before compiling.
"""
class Checker(build_ext_class):
flags = tuple(input_flags)
def can_compile_link(self, flags):
cc = self.compiler
fname = 'test.c'
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
with open(fname, 'wt') as fobj:
fobj.write(TEST_C)
try:
objects = cc.compile([fname], extra_postargs=flags)
except CompileError:
return False
try:
cc.link_executable(objects, "a.out", extra_postargs=flags)
except (LinkError, TypeError):
return False
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
return True
def build_extensions(self):
""" Hook into extension building to check compiler flags """
for flag in self.flags:
if not self.can_compile_link([flag]):
log.warn("Flag {0} omitted because of compile or link "
"error".format(flag))
continue
for ext in self.extensions:
ext.extra_compile_args.append(flag)
ext.extra_link_args.append(flag)
build_ext_class.build_extensions(self)
return Checker
|
the-stack_0_27315
|
from flask import Blueprint, render_template, current_app, abort, make_response
import cea.inputlocator
import cea.plots
import cea.plots.categories
import re
import cea.schemas
from cea import MissingInputDataException
blueprint = Blueprint(
'plots_blueprint',
__name__,
url_prefix='/plots',
template_folder='templates',
static_folder='static',
)
def script_suggestions(locator_names):
"""Return a list of CeaScript objects that produce the output for each locator name"""
import cea.scripts
plugins = current_app.cea_config.plugins
schemas = cea.schemas.schemas(plugins=plugins)
script_names = []
for name in locator_names:
script_names.extend(schemas[name]['created_by'])
return [cea.scripts.by_name(n, plugins=plugins) for n in sorted(set(script_names))]
def load_plot(dashboard, plot_index):
"""Load a plot from the dashboard_yml"""
cea_config = current_app.cea_config
dashboards = cea.plots.read_dashboards(cea_config, current_app.plot_cache)
dashboard = dashboards[dashboard]
plot = dashboard.plots[plot_index]
return plot
def render_missing_data(missing_files):
return render_template('missing_input_files.html',
missing_input_files=[lm(*args) for lm, args in missing_files],
script_suggestions=script_suggestions(lm.__name__ for lm, _ in missing_files)), 404
@blueprint.route('/div/<int:dashboard_index>/<int:plot_index>')
def route_div(dashboard_index, plot_index):
"""Return the plot as a div to be used in an AJAX call"""
plot = load_plot(dashboard_index, plot_index)
try:
plot_div = plot.plot_div()
except MissingInputDataException:
return render_missing_data(plot.missing_input_files())
except NotImplementedError as e:
return make_response('<p>{message}</p>'.format(message=e.message), 404)
# Remove parent <div> if exists due to plotly v4
if plot_div.startswith("<div>"):
plot_div = plot_div[5:-5].strip()
# BUGFIX for (#2102 - Can't add the same plot twice in a dashboard)
# update id of div to include dashboard_index and plot_index
if plot_div.startswith("<div id="):
div_id = re.match('<div id="([0-9a-f-]+)"', plot_div).group(1)
plot_div = plot_div.replace(div_id, "{div_id}-{dashboard_index}-{plot_index}".format(
div_id=div_id, dashboard_index=dashboard_index, plot_index=plot_index))
return make_response(plot_div, 200)
@blueprint.route('/plot/<int:dashboard_index>/<int:plot_index>')
def route_plot(dashboard_index, plot_index):
plot = load_plot(dashboard_index, plot_index)
plot_title = plot.title
if 'scenario-name' in plot.parameters:
plot_title += ' - {}'.format(plot.parameters['scenario-name'])
try:
plot_div = plot.plot_div()
except MissingInputDataException:
return render_missing_data(plot.missing_input_files())
except NotImplementedError as e:
return make_response('<p>{message}</p>'.format(message=e.message), 404)
return render_template('plot.html', plot_div=plot_div, plot_title=plot_title)
@blueprint.app_errorhandler(500)
def internal_error(error):
import traceback
error_trace = traceback.format_exc()
return error_trace, 500
|
the-stack_0_27316
|
# Import
from flask import Flask, jsonify
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import datetime as dt
# Database Setup - create engine
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# Reflect an existing database into a new model
Base = automap_base()
# Reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create a database session object
session = Session(engine)
# Flask Setup
app = Flask(__name__)
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates."""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# Define what to do when a user hits the index route
@app.route("/")
def main():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
# Define what to do when a user hits the routes.
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Convert the query results to a dictionary using `date` as the key and `prcp` as the value."""
print("Received precipitation api request.")
# Design a query to retrieve the last 12 months of precipitation data.
final_date_query = session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date))).all()
max_date_string = final_date_query[0][0]
max_date = dt.datetime.strptime(max_date_string, "%Y-%m-%d")
begin_date = max_date - dt.timedelta(365)
precipitation_data = session.query(func.strftime("%Y-%m-%d", Measurement.date), Measurement.prcp).\
filter(func.strftime("%Y-%m-%d", Measurement.date) >= begin_date).all()
results_dict = {}
for result in precipitation_data:
results_dict[result[0]] = result[1]
return jsonify(results_dict)
@app.route("/api/v1.0/stations")
def stations():
"""List of stations from the dataset"""
print("Received station api request.")
stations_data = session.query(Station).all()
stations_list = []
for station in stations_data:
station_dict = {}
station_dict["id"] = station.id
station_dict["station"] = station.station
station_dict["name"] = station.name
station_dict["latitude"] = station.latitude
station_dict["longitude"] = station.longitude
station_dict["elevation"] = station.elevation
stations_list.append(station_dict)
return jsonify(stations_list)
@app.route("/api/v1.0/tobs")
def tobs():
"""List of temperature observations (TOBS) for the previous year"""
print("Received TOBS api request.")
final_date_query = session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date))).all()
max_date_string = final_date_query[0][0]
max_date = dt.datetime.strptime(max_date_string, "%Y-%m-%d")
begin_date = max_date - dt.timedelta(365)
results = session.query(Measurement).\
filter(func.strftime("%Y-%m-%d", Measurement.date) >= begin_date).all()
tobs_list = []
for result in results:
tobs_dict = {}
tobs_dict["date"] = result.date
tobs_dict["station"] = result.station
tobs_dict["tobs"] = result.tobs
tobs_list.append(tobs_dict)
return jsonify(tobs_list)
@app.route("/api/v1.0/<start>")
def start(start):
"""List of the minimum temperature, the average temperature, and the max temperature for a given start"""
print("Received start date api request.")
final_date_query = session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date))).all()
max_date = final_date_query[0][0]
temps = calc_temps(start, max_date)
return_list = []
date_dict = {'start_date': start, 'end_date': max_date}
return_list.append(date_dict)
return_list.append({'Observation': 'TMIN', 'Temperature': temps[0][0]})
return_list.append({'Observation': 'TAVG', 'Temperature': temps[0][1]})
return_list.append({'Observation': 'TMAX', 'Temperature': temps[0][2]})
return jsonify(return_list)
@app.route("/api/v1.0/<start>/<end>")
def start_end(start, end):
"""List of the minimum temperature, the average temperature, and the max temperature for a given start-end range"""
print("Received start date and end date api request.")
temps = calc_temps(start, end)
#create a list
return_list = []
date_dict = {'start_date': start, 'end_date': end}
return_list.append(date_dict)
return_list.append({'Observation': 'TMIN', 'Temperature': temps[0][0]})
return_list.append({'Observation': 'TAVG', 'Temperature': temps[0][1]})
return_list.append({'Observation': 'TMAX', 'Temperature': temps[0][2]})
return jsonify(return_list)
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_27319
|
"""
***************************************************************************
QGIS Server Plugin Filters: Add a new request to print a specific atlas
feature
---------------------
Date : October 2017
Copyright : (C) 2017 by Michaël Douchin - 3Liz
Email : mdouchin at 3liz dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from qgis.core import Qgis, QgsMessageLog
from qgis.server import QgsServerFilter
class AtlasPrintFilter(QgsServerFilter):
def __init__(self, server_iface):
QgsMessageLog.logMessage('atlasprintFilter.init', 'atlasprint', Qgis.Info)
super(AtlasPrintFilter, self).__init__(server_iface)
self.server_iface = server_iface
# QgsMessageLog.logMessage("atlasprintFilter end init", 'atlasprint', Qgis.Info)
def requestReady(self):
handler = self.server_iface.requestHandler()
params = handler.parameterMap()
service = params.get('SERVICE')
if not service:
return
if service.lower() != 'wms':
return
# Check request to change atlas one
if 'REQUEST' not in params or params['REQUEST'].lower() not in ['getprintatlas', 'getcapabilitiesatlas']:
return
request = params['REQUEST'].lower()
handler.setParameter('SERVICE', 'ATLAS')
handler.setParameter('VERSION', '1.0.0')
if request == 'getcapabilitiesatlas':
handler.setParameter('REQUEST', 'GetCapabilities')
elif request == 'getprintatlas':
handler.setParameter('REQUEST', 'GetPrint')
|
the-stack_0_27320
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: policyuniverse.tests.test_expander_minimizer
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <[email protected]>
"""
import unittest
import copy
from policyuniverse.expander_minimizer import expand_policy
from policyuniverse.expander_minimizer import minimize_policy
from policyuniverse.expander_minimizer import expand_minimize_over_policies
from policyuniverse.expander_minimizer import get_actions_from_statement
from policyuniverse.expander_minimizer import all_permissions
from policyuniverse.expander_minimizer import minimize_statement_actions
from policyuniverse.expander_minimizer import _get_prefixes_for_action
from policyuniverse.expander_minimizer import _expand_wildcard_action
from policyuniverse.expander_minimizer import _get_desired_actions_from_statement
WILDCARD_ACTION_1 = "swf:res*"
WILDCARD_POLICY_1 = {
"Statement": [{"Action": [WILDCARD_ACTION_1], "Resource": "*", "Effect": "Allow"}]
}
EXPANDED_ACTIONS_1 = [
"swf:respondactivitytaskcanceled",
"swf:respondactivitytaskcompleted",
"swf:respondactivitytaskfailed",
"swf:responddecisiontaskcompleted",
]
EXPANDED_POLICY_1 = {
"Statement": [{"Action": EXPANDED_ACTIONS_1, "Resource": "*", "Effect": "Allow"}]
}
WILDCARD_POLICY_2 = {
"Statement": [
{"Action": ["swf:*activitytaskc*"], "Resource": "*", "Effect": "Allow"}
]
}
EXPANDED_POLICY_2 = {
"Statement": [
{
"Action": [
"swf:respondactivitytaskcanceled",
"swf:respondactivitytaskcompleted",
],
"Resource": "*",
"Effect": "Allow",
}
]
}
POLICIES_1 = {
"policy": {"policyname1": WILDCARD_POLICY_1, "policyname2": WILDCARD_POLICY_2}
}
EXPANDED_POLICIES_1 = {
"policy": {"policyname1": EXPANDED_POLICY_1, "policyname2": EXPANDED_POLICY_2}
}
AUTOSCALING_PERMISSIONS = sorted(
[
"autoscaling:attachinstances",
"autoscaling:attachloadbalancers",
"autoscaling:attachloadbalancertargetgroups",
"autoscaling:batchdeletescheduledaction",
"autoscaling:batchputscheduledupdategroupaction",
"autoscaling:cancelinstancerefresh",
"autoscaling:completelifecycleaction",
"autoscaling:createautoscalinggroup",
"autoscaling:createlaunchconfiguration",
"autoscaling:createorupdatetags",
"autoscaling:deleteautoscalinggroup",
"autoscaling:deletelaunchconfiguration",
"autoscaling:deletelifecyclehook",
"autoscaling:deletenotificationconfiguration",
"autoscaling:deletepolicy",
"autoscaling:deletescheduledaction",
"autoscaling:deletetags",
"autoscaling:describeaccountlimits",
"autoscaling:describeadjustmenttypes",
"autoscaling:describeautoscalinggroups",
"autoscaling:describeautoscalinginstances",
"autoscaling:describeautoscalingnotificationtypes",
"autoscaling:describeinstancerefreshes",
"autoscaling:describelaunchconfigurations",
"autoscaling:describelifecyclehooks",
"autoscaling:describelifecyclehooktypes",
"autoscaling:describeloadbalancers",
"autoscaling:describeloadbalancertargetgroups",
"autoscaling:describemetriccollectiontypes",
"autoscaling:describenotificationconfigurations",
"autoscaling:describepolicies",
"autoscaling:describescalingactivities",
"autoscaling:describescalingprocesstypes",
"autoscaling:describescheduledactions",
"autoscaling:describetags",
"autoscaling:describeterminationpolicytypes",
"autoscaling:detachinstances",
"autoscaling:detachloadbalancers",
"autoscaling:detachloadbalancertargetgroups",
"autoscaling:disablemetricscollection",
"autoscaling:enablemetricscollection",
"autoscaling:enterstandby",
"autoscaling:executepolicy",
"autoscaling:exitstandby",
"autoscaling:putlifecyclehook",
"autoscaling:putnotificationconfiguration",
"autoscaling:putscalingpolicy",
"autoscaling:putscheduledupdategroupaction",
"autoscaling:recordlifecycleactionheartbeat",
"autoscaling:resumeprocesses",
"autoscaling:setdesiredcapacity",
"autoscaling:setinstancehealth",
"autoscaling:setinstanceprotection",
"autoscaling:startinstancerefresh",
"autoscaling:suspendprocesses",
"autoscaling:terminateinstanceinautoscalinggroup",
"autoscaling:updateautoscalinggroup",
]
)
def dc(o):
"""
Some of the testing methods modify the datastructure you pass into them.
We want to deepcopy each structure so one test doesn't break another.
"""
return copy.deepcopy(o)
class TestMethods(unittest.TestCase):
def test_expand_1(self):
expanded_policy = expand_policy(policy=dc(WILDCARD_POLICY_1))
self.assertEqual(expanded_policy, EXPANDED_POLICY_1)
policy = {
"Statement": {
"NotAction": ["ec2:thispermissiondoesntexist"],
"Resource": "*",
"Effect": "Deny",
}
}
expected_policy = {
"Statement": [
{
"NotAction": ["ec2:thispermissiondoesntexist"],
"Resource": "*",
"Effect": "Deny",
}
]
}
expanded_policy = expand_policy(policy=dc(policy), expand_deny=False)
self.assertEqual(expanded_policy, expected_policy)
expanded_policy = expand_policy(policy=dc(policy), expand_deny=True)
self.assertEqual(type(expanded_policy["Statement"]), list)
def test_expand_2(self):
expanded_policy = expand_policy(policy=dc(WILDCARD_POLICY_2))
self.assertEqual(expanded_policy, EXPANDED_POLICY_2)
def test_expand_minimize_over_policies(self):
result = expand_minimize_over_policies(dc(POLICIES_1), expand_policy)
self.assertEqual(result, EXPANDED_POLICIES_1)
def test_expand_minimize_over_policies_1(self):
result = expand_minimize_over_policies(
EXPANDED_POLICY_1, minimize_policy, minchars=3
)
self.assertEqual(result, WILDCARD_POLICY_1)
def test_get_prefixes_for_action(self):
result = _get_prefixes_for_action("iam:cat")
self.assertEqual(result, ["iam:", "iam:c", "iam:ca", "iam:cat"])
def test_expand_wildcard_action(self):
result = _expand_wildcard_action(["autoscaling:*"])
self.assertEqual(sorted(result), AUTOSCALING_PERMISSIONS)
def test_expand_wildcard_action_2(self):
result = _expand_wildcard_action("thistechdoesntexist:*")
self.assertEqual(result, ["thistechdoesntexist:*"])
def test_expand_wildcard_action_3(self):
result = _expand_wildcard_action("ec2:DescribeInstances")
self.assertEqual(result, ["ec2:describeinstances"])
def test_get_desired_actions_from_statement(self):
result = _get_desired_actions_from_statement(
dc(WILDCARD_POLICY_1["Statement"][0])
)
self.assertEqual(result, set(EXPANDED_ACTIONS_1))
def test_get_desired_actions_from_statement_1(self):
statement = {
"Action": ["ec2:thispermissiondoesntexist"],
"Resource": "*",
"Effect": "Allow",
}
self.assertRaises(Exception, _get_desired_actions_from_statement, statement)
def test_get_actions_from_statement(self):
statement = {
"Action": "ec2:thispermissiondoesntexist",
"NotAction": list(all_permissions),
"Resource": "*",
"Effect": "Allow",
}
expected_result = {"ec2:thispermissiondoesntexist"}
result = get_actions_from_statement(statement)
self.assertEqual(result, expected_result)
result = get_actions_from_statement(dict(NotAction="abc"))
self.assertSetEqual(result, set(all_permissions))
statement = {
"Action": (
"ec2:updatesecuritygroupruledescriptionsegress",
"ec2:cancelcapacityreservation",
),
"NotAction": tuple(),
"Resource": "*",
"Effect": "Allow",
}
result = get_actions_from_statement(statement)
self.assertSetEqual(
result,
{
"ec2:updatesecuritygroupruledescriptionsegress",
"ec2:cancelcapacityreservation",
},
)
def test_minimize_statement_actions(self):
statement = dict(Effect="Deny")
self.assertRaises(Exception, minimize_statement_actions, statement)
|
the-stack_0_27321
|
import os
import requests
import json
import random
import emoji
import regex
import csv
from collections import Counter
from server.api_handler import ApiHandler
from server.authentication import Authentication
from server.database.manage_db import create_connection
from forms import DropdownForm
authentication = Authentication()
def construct_dropdown(options):
form = DropdownForm()
form.select.choices = options
return form
def get_data_from_db(query):
connection = create_connection(r"./server/database/annotations.db")
cursor = connection.cursor()
cursor.execute(query)
return cursor.fetchall()
def get_user_details(username):
username = username.lstrip("@")
user_lookup = ApiHandler("users/by", authentication)
response = user_lookup(payload={"usernames": username})
if response.status_code != 200:
print("Could not load user. Error code:", response.status_code)
user_id = None
else:
data = json.loads(response.text)
user_id = data["data"][0]["id"]
return username, user_id
def get_user_tweet_timeline(user_id):
tweets = []
user_tweet_timeline = ApiHandler(f"users/{user_id}/tweets", authentication)
payload = {"tweet.fields": "context_annotations,entities", "max_results": "100"}
response = user_tweet_timeline(payload)
if response.status_code != 200:
print("Response", response.status_code, response.text)
else:
data = json.loads(response.text)
request_count = 1
if "data" in data:
for tweet in data["data"]:
tweets.append(tweet)
# Change the request_count condition below to receive more or less Tweets to analyze (100 Tweets returned per request)
while "next_token" in data["meta"] and request_count < 3:
pagination_token = data["meta"]["next_token"]
payload.update(pagination_token=pagination_token)
response = user_tweet_timeline(payload)
if response.status_code != 200:
print("Response:", response.status_code)
break
data = json.loads(response.text)
request_count += 1
if "data" in data:
for tweet in data["data"]:
tweets.append(tweet)
print("Request count:", request_count)
print("Code:", response.status_code)
return tweets, response.status_code
def get_user_tweet_timeline_no_pagination(user_id):
tweets = []
user_tweet_timeline = ApiHandler(f"users/{user_id}/tweets", authentication)
payload = {"tweet.fields": "context_annotations,entities", "max_results": "10"}
response = user_tweet_timeline(payload)
if response.status_code != 200:
print("Response", response.status_code, response.text)
else:
data = json.loads(response.text)
if "data" in data:
for tweet in data["data"]:
tweets.append(tweet)
return tweets, response.status_code
def get_user_followers(user_id):
followers = []
user_followers = ApiHandler(f"users/{user_id}/followers", authentication)
payload = {"max_results": "1000", "user.fields": "public_metrics"}
response = user_followers(payload)
if response.status_code != 200:
print("Response:", response.status_code, response.text)
else:
data = json.loads(response.text)
request_count = 1
if "data" in data:
for follower in data["data"]:
followers.append(follower)
while "next_token" in data["meta"] and request_count <= 6:
pagination_token = data["meta"]["next_token"]
payload.update(pagination_token=pagination_token)
response = user_followers(payload)
if response.status_code != 200:
print("Response:", response.status_code)
break
data = json.loads(response.text)
request_count += 1
if "data" in data:
for follower in data["data"]:
followers.append(follower)
print("Request count:", request_count)
return followers, response.status_code
def random_selection(followers, follower_count):
# Randomly select 50 followers, from whom to analyze annotations data.
if follower_count > 50:
# .seed() method makes the random selection deterministic
random.seed(56)
selection = random.sample(followers, k=50)
else:
selection = followers
return selection
def get_style(tweets, value):
text = " "
for tweet in tweets:
text = text + tweet["text"]
emoji_list = []
emojis = emoji.UNICODE_EMOJI["en"].keys()
data = regex.findall(r"\X", text)
for symbol in data:
for char in symbol:
if char in emojis:
emoji_list.append(symbol)
break
if len(emoji_list) > 0:
emoji_count = Counter(emoji_list)
emoji_dict = {}
for i in emoji_count:
k = i
v = emoji_count[i]
emoji_dict[k] = v
top_emojis_unsorted = {k: v for (k, v) in emoji_dict.items() if v > value}
top_emojis = {
k: v
for k, v in sorted(
top_emojis_unsorted.items(), key=lambda item: item[1], reverse=True
)
}
else:
top_emojis = 1
return top_emojis
def get_visualisation(username):
url = "https://a54df80b7070.ngrok.io/stream/timelines"
payload = {"twitter_handle": f"{username}"}
headers = {"Content-Type": "application/json"}
response = requests.request("POST", url, json=payload, headers=headers)
return response
def get_annotations(tweets):
domain = []
entity = []
person = []
place = []
product = []
organization = []
other = []
tweet_count = 0
try:
for tweet in tweets:
tweet_count += 1
if "context_annotations" in tweet:
for annotation in tweet["context_annotations"]:
domain.append(annotation["domain"]["name"])
entity.append(annotation["entity"]["name"])
if "entities" in tweet:
if "annotations" in tweet["entities"]:
for annotation in tweet["entities"]["annotations"]:
if annotation["probability"] >= 0.5:
if annotation["type"] == "Person":
person.append(annotation["normalized_text"])
elif annotation["type"] == "Place":
place.append(annotation["normalized_text"])
elif annotation["type"] == "Product":
product.append(annotation["normalized_text"])
elif annotation["type"] == "Organization":
organization.append(annotation["normalized_text"])
elif annotation["type"] == "Other":
other.append(annotation["normalized_text"])
else:
pass
domain_frequency = {d: domain.count(d) for d in domain}
entity_frequency = {e: entity.count(e) for e in entity}
domain_frequency_ordered = {
k: v
for k, v in sorted(
domain_frequency.items(), key=lambda item: item[1], reverse=True
)
}
entity_frequency_ordered = {
k: v
for k, v in sorted(
entity_frequency.items(), key=lambda item: item[1], reverse=True
)
}
person_frequency = {i: person.count(i) for i in person}
place_frequency = {i: place.count(i) for i in place}
product_frequency = {i: product.count(i) for i in product}
organization_frequency = {i: organization.count(i) for i in organization}
other_frequency = {i: other.count(i) for i in other}
person_frequency_ordered = {
k: v
for k, v in sorted(
person_frequency.items(), key=lambda item: item[1], reverse=True
)
}
place_frequency_ordered = {
k: v
for k, v in sorted(
place_frequency.items(), key=lambda item: item[1], reverse=True
)
}
product_frequency_ordered = {
k: v
for k, v in sorted(
product_frequency.items(), key=lambda item: item[1], reverse=True
)
}
organization_frequency_ordered = {
k: v
for k, v in sorted(
organization_frequency.items(), key=lambda item: item[1], reverse=True
)
}
other_frequency_ordered = {
k: v
for k, v in sorted(
other_frequency.items(), key=lambda item: item[1], reverse=True
)
}
# Only returns annotations and entities that are present in at least 2+ Tweets.
domain_list_top = {k: v for k, v in domain_frequency_ordered.items() if v >= 2}
entity_list_top = {k: v for k, v in entity_frequency_ordered.items() if v >= 2}
person_list_top = {k: v for k, v in person_frequency_ordered.items() if v >= 2}
place_list_top = {k: v for k, v in place_frequency_ordered.items() if v >= 2}
product_list_top = {
k: v for k, v in product_frequency_ordered.items() if v >= 2
}
organization_list_top = {
k: v for k, v in organization_frequency_ordered.items() if v >= 2
}
other_list_top = {k: v for k, v in other_frequency_ordered.items() if v >= 2}
except:
print(
f"""
No topics data to analyse for @{username} in the past week
"""
)
domain_frequency_ordered = None
entity_frequency_ordered = None
person_frequency_ordered = None
place_frequency_ordered = None
product_frequency_ordered = None
organization_frequency_ordered = None
other_frequency_ordered = None
return (
tweet_count,
domain_list_top,
entity_list_top,
person_list_top,
place_list_top,
product_list_top,
organization_list_top,
other_list_top,
)
def update_annotations(dict1, dict2):
for k, v in dict2.items():
if k in dict1:
dict1[k] += v
else:
dict1[k] = v
return dict1
def get_user_by_id(user_id):
user_lookup = ApiHandler(f"users/{user_id}", authentication)
response = user_lookup(
payload={
"user.fields": "created_at,description,location,name,username,verified,public_metrics"
}
)
if response.status_code != 200:
print("Could not load user. Error code:", response.status_code)
data = json.loads(response.text)
created_at = None
description = None
location = None
name = None
username = None
verified = None
metrics = None
else:
data = json.loads(response.text)
created_at = data["data"]["created_at"]
description = data["data"]["description"]
name = data["data"]["name"]
username = data["data"]["username"]
verified = data["data"]["verified"]
metrics = data["data"]["public_metrics"]
username = "@" + username
return (
response.status_code,
username,
name,
description,
metrics,
created_at,
verified,
)
def search_tweets(query):
payload = {"query": query, "expansions": "author_id", "max_results": "50"}
search_tweets = ApiHandler("tweets/search/recent", authentication)
response = search_tweets(payload)
if response.status_code != 200:
print("Could not fetch data. Error code:", response.status_code)
data = None
else:
data = json.loads(response.text)
return data, response.status_code
def search_tweets_with_pagination(query):
payload = {"query": query, "tweet.fields": "public_metrics", "max_results": "100"}
search_tweets = ApiHandler("tweets/search/recent", authentication)
response = search_tweets(payload)
results = []
if response.status_code != 200:
print("Could not fetch data. Error code:", response.status_code)
data = None
else:
data = json.loads(response.text)
request_count = 1
if "data" in data:
for tweet in data["data"]:
results.append(tweet)
while "next_token" in data["meta"]:
next_token = data["meta"]["next_token"]
payload.update(next_token=next_token)
response = search_tweets(payload)
if response.status_code != 200:
print("Could not fetch data. Error code:", response.status_code)
break
data = json.loads(response.text)
request_count += 1
if "data" in data:
for tweet in data["data"]:
results.append(tweet)
return results, response.status_code
def get_tweet_metrics(tweets):
retweet_count = 0
like_count = 0
reply_count = 0
quote_count = 0
for tweet in tweets:
if tweet["public_metrics"]["retweet_count"]:
retweet_count += tweet["public_metrics"]["retweet_count"]
if tweet["public_metrics"]["reply_count"]:
reply_count += tweet["public_metrics"]["reply_count"]
if tweet["public_metrics"]["like_count"]:
like_count += tweet["public_metrics"]["like_count"]
if tweet["public_metrics"]["quote_count"]:
quote_count += tweet["public_metrics"]["quote_count"]
return retweet_count, like_count, reply_count, quote_count
def get_users(data):
users = []
user_details = []
for tweet in data[0]["data"]:
user = tweet["author_id"]
tweet_id = tweet["id"]
if user not in users:
users.append([user, tweet_id])
for item in users:
user = item[0]
tweet_id = item[1]
user_info = get_user_by_id(user)
user_details.append([user_info, tweet_id])
return user_details
def write_dict_to_csv_row(data_dict, writer, header):
row = [header]
for k, v in data_dict.items():
row.append(k)
row.append(v)
writer.writerow(row)
def export_to_csv(
path_name, domain, entity, person, place, product, organization, other
):
with open(path_name, "w") as csvfile:
writer = csv.writer(csvfile)
write_dict_to_csv_row(domain, writer, "TOP DOMAINS")
write_dict_to_csv_row(entity, writer, "TOP ENTITIES")
write_dict_to_csv_row(person, writer, "TOP PEOPLE")
write_dict_to_csv_row(place, writer, "TOP PLACES")
write_dict_to_csv_row(product, writer, "TOP PRODUCTS")
write_dict_to_csv_row(organization, writer, "TOP ORGANIZATIONS")
write_dict_to_csv_row(other, writer, "TOP OTHER TOPICS")
def get_profiles_for_topic():
"""
Pass in a given topic and get back a list of users who are interested in the topic.
E.g. Surface a set of profiles who are enthusiastic about skiing.
"""
pass
def get_tweet_metrics_for_topic():
"""
Pass in a given topic and get back Tweet metrics for each topic.
I.e. how often (or not) is a given topic mentioned on the platform?.
The full Tweet text is also returned and displayed with associated metrics.
"""
pass
|
the-stack_0_27322
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Runs CIFAR10 training with differential privacy.
"""
import argparse
import os
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.utils.data.distributed
import torch.utils.tensorboard as tensorboard
import torchvision.models as models
import torchvision.transforms as transforms
from opacus import PrivacyEngine
from opacus.utils import stats
from opacus.utils.module_modification import convert_batchnorm_modules
from torchvision.datasets import CIFAR10
from tqdm import tqdm
from functools import partial
import functorch
from functorch import vmap, grad_and_value
from functorch import make_functional
# disable warning spam
functorch._C._set_vmap_fallback_warning_enabled(False)
def save_checkpoint(state, is_best, filename="checkpoint.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
def accuracy(preds, labels):
return (preds == labels).mean()
def compute_norms(sample_grads):
batch_size = sample_grads[0].shape[0]
norms = [sample_grad.view(batch_size, -1).norm(2, dim=-1) for sample_grad in sample_grads]
norms = torch.stack(norms, dim=0).norm(2, dim=0)
return norms
def clip_and_accumulate_and_add_noise(model, max_per_sample_grad_norm=1.0, noise_multiplier=1.0):
sample_grads = tuple(param.grad_sample for param in model.parameters())
# step 0: compute the norms
sample_norms = compute_norms(sample_grads)
# step 1: compute clipping factors
clip_factor = max_per_sample_grad_norm / (sample_norms + 1e-6)
clip_factor = clip_factor.clamp(max=1.0)
# step 2: clip
grads = tuple(torch.einsum('i,i...', clip_factor, sample_grad)
for sample_grad in sample_grads)
# step 3: add gaussian noise
stddev = max_per_sample_grad_norm * noise_multiplier
noises = tuple(torch.normal(0, stddev, grad_param.shape, device=grad_param.device)
for grad_param in grads)
grads = tuple(noise + grad_param for noise, grad_param in zip(noises, grads))
# step 4: assign the new grads, delete the sample grads
for param, param_grad in zip(model.parameters(), grads):
param.grad = param_grad
del param.grad_sample
def train(args, model, train_loader, optimizer, epoch, device):
model.train()
criterion = nn.CrossEntropyLoss()
losses = []
top1_acc = []
for i, (images, target) in enumerate(tqdm(train_loader)):
images = images.to(device)
target = target.to(device)
# Step 1: compute per-sample-grads
# In order to use functional vmap+grad, we need to be able to
# pass the weights to a model.
func_model, weights = make_functional(model)
# To use vmap+grad to compute per-sample-grads, the forward pass
# must be re-formulated on a single example.
# We use the `grad` operator to compute forward+backward on a single example,
# and finally `vmap` to do forward+backward on multiple examples.
def compute_loss_and_output(weights, image, target):
images = image.unsqueeze(0)
targets = target.unsqueeze(0)
output = func_model(weights, images)
loss = criterion(output, targets)
return loss, output.squeeze(0)
# `grad(f)` is a functional API that returns a function `f'` that
# computes gradients by running both the forward and backward pass.
# We want to extract some intermediate
# values from the computation (i.e. the loss and output).
#
# To extract the loss, we use the `grad_and_value` API, that returns the
# gradient of the weights w.r.t. the loss and the loss.
#
# To extract the output, we use the `has_aux=True` flag.
# `has_aux=True` assumes that `f` returns a tuple of two values,
# where the first is to be differentiated and the second "auxiliary value"
# is not to be differentiated. `f'` returns the gradient w.r.t. the loss,
# the loss, and the auxiliary value.
grads_loss_output = grad_and_value(compute_loss_and_output, has_aux=True)
sample_grads, (sample_loss, output) = \
vmap(grads_loss_output, (None, 0, 0))(weights, images, target)
loss = sample_loss.mean()
for grad_sample, weight in zip(sample_grads, model.parameters()):
weight.grad_sample = grad_sample.detach()
# Step 2: Clip the per-sample-grads, sum them to form grads, and add noise
clip_and_accumulate_and_add_noise(
model, args.max_per_sample_grad_norm, args.sigma)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
losses.append(loss.item())
# measure accuracy and record loss
acc1 = accuracy(preds, labels)
top1_acc.append(acc1)
stats.update(stats.StatType.TRAIN, acc1=acc1)
# make sure we take a step after processing the last mini-batch in the
# epoch to ensure we start the next epoch with a clean state
if ((i + 1) % args.n_accumulation_steps == 0) or ((i + 1) == len(train_loader)):
optimizer.step()
optimizer.zero_grad()
else:
optimizer.virtual_step()
if i % args.print_freq == 0:
print(
f"\tTrain Epoch: {epoch} \t"
f"Loss: {np.mean(losses):.6f} "
f"Acc@1: {np.mean(top1_acc):.6f} "
)
def test(args, model, test_loader, device):
model.eval()
criterion = nn.CrossEntropyLoss()
losses = []
top1_acc = []
with torch.no_grad():
for images, target in tqdm(test_loader):
images = images.to(device)
target = target.to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc1 = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc1)
top1_avg = np.mean(top1_acc)
stats.update(stats.StatType.TEST, acc1=top1_avg)
print(f"\tTest set:" f"Loss: {np.mean(losses):.6f} " f"Acc@1: {top1_avg :.6f} ")
return np.mean(top1_acc)
def main():
parser = argparse.ArgumentParser(description="PyTorch CIFAR10 DP Training")
parser.add_argument(
"-j",
"--workers",
default=2,
type=int,
metavar="N",
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch",
default=1,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
# This should be 256, but that OOMs using the prototype.
default=64,
type=int,
metavar="N",
help="mini-batch size (default: 64), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"-na",
"--n_accumulation_steps",
default=1,
type=int,
metavar="N",
help="number of mini-batches to accumulate into an effective batch",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.001,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="SGD momentum"
)
parser.add_argument(
"--wd",
"--weight-decay",
default=5e-4,
type=float,
metavar="W",
help="SGD weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument(
"--device",
type=str,
default="cuda",
help="GPU ID for this process (default: 'cuda')",
)
parser.add_argument(
"--sigma",
type=float,
default=1.0,
metavar="S",
help="Noise multiplier (default 1.0)",
)
parser.add_argument(
"-c",
"--max-per-sample-grad_norm",
type=float,
default=1.0,
metavar="C",
help="Clip per-sample gradients to this norm (default 1.0)",
)
parser.add_argument(
"--disable-dp",
action="store_true",
default=False,
help="Disable privacy training and just train with vanilla SGD",
)
parser.add_argument(
"--secure-rng",
action="store_true",
default=False,
help="Enable Secure RNG to have trustworthy privacy guarantees. Comes at a performance cost",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
metavar="D",
help="Target delta (default: 1e-5)",
)
parser.add_argument(
"--checkpoint-file",
type=str,
default="checkpoint",
help="path to save check points",
)
parser.add_argument(
"--data-root",
type=str,
default="../cifar10",
help="Where CIFAR10 is/will be stored",
)
parser.add_argument(
"--log-dir", type=str, default="", help="Where Tensorboard log will be stored"
)
parser.add_argument(
"--optim",
type=str,
default="Adam",
help="Optimizer to use (Adam, RMSprop, SGD)",
)
args = parser.parse_args()
args.disable_dp = True
if args.disable_dp and args.n_accumulation_steps > 1:
raise ValueError("Virtual steps only works with enabled DP")
# The following few lines, enable stats gathering about the run
# 1. where the stats should be logged
stats.set_global_summary_writer(
tensorboard.SummaryWriter(os.path.join("/tmp/stat", args.log_dir))
)
# 2. enable stats
stats.add(
# stats about gradient norms aggregated for all layers
stats.Stat(stats.StatType.GRAD, "AllLayers", frequency=0.1),
# stats about gradient norms per layer
stats.Stat(stats.StatType.GRAD, "PerLayer", frequency=0.1),
# stats about clipping
stats.Stat(stats.StatType.GRAD, "ClippingStats", frequency=0.1),
# stats on training accuracy
stats.Stat(stats.StatType.TRAIN, "accuracy", frequency=0.01),
# stats on validation accuracy
stats.Stat(stats.StatType.TEST, "accuracy"),
)
# The following lines enable stat gathering for the clipping process
# and set a default of per layer clipping for the Privacy Engine
clipping = {"clip_per_layer": False, "enable_stat": True}
if args.secure_rng:
assert False
try:
import torchcsprng as prng
except ImportError as e:
msg = (
"To use secure RNG, you must install the torchcsprng package! "
"Check out the instructions here: https://github.com/pytorch/csprng#installation"
)
raise ImportError(msg) from e
generator = prng.create_random_device_generator("/dev/urandom")
else:
generator = None
augmentations = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
]
normalize = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
train_transform = transforms.Compose(
augmentations + normalize if args.disable_dp else normalize
)
test_transform = transforms.Compose(normalize)
train_dataset = CIFAR10(
root=args.data_root, train=True, download=True, transform=train_transform
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
drop_last=True,
generator=generator,
)
test_dataset = CIFAR10(
root=args.data_root, train=False, download=True, transform=test_transform
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
)
best_acc1 = 0
device = torch.device(args.device)
model = convert_batchnorm_modules(models.resnet18(num_classes=10))
# model = CIFAR10Model()
model = model.to(device)
if args.optim == "SGD":
optimizer = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optim == "RMSprop":
optimizer = optim.RMSprop(model.parameters(), lr=args.lr)
elif args.optim == "Adam":
optimizer = optim.Adam(model.parameters(), lr=args.lr)
else:
raise NotImplementedError("Optimizer not recognized. Please check spelling")
if not args.disable_dp:
privacy_engine = PrivacyEngine(
model,
batch_size=args.batch_size * args.n_accumulation_steps,
sample_size=len(train_dataset),
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=args.sigma,
max_grad_norm=args.max_per_sample_grad_norm,
secure_rng=args.secure_rng,
**clipping,
)
privacy_engine.attach(optimizer)
for epoch in range(args.start_epoch, args.epochs + 1):
train(args, model, train_loader, optimizer, epoch, device)
top1_acc = test(args, model, test_loader, device)
# remember best acc@1 and save checkpoint
is_best = top1_acc > best_acc1
best_acc1 = max(top1_acc, best_acc1)
save_checkpoint(
{
"epoch": epoch + 1,
"arch": "ResNet18",
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
filename=args.checkpoint_file + ".tar",
)
if __name__ == "__main__":
main()
|
the-stack_0_27323
|
import hashlib
import json
import logging
import uuid
from flask import redirect, Blueprint, abort, send_file, make_response, request
import features
from app import app, signer, storage, metric_queue, config_provider, ip_resolver, instance_keys
from auth.auth_context import get_authenticated_user
from auth.decorators import process_auth
from auth.permissions import ReadRepositoryPermission
from data import database
from data import model
from data.registry_model import registry_model
from endpoints.decorators import (anon_protect, anon_allowed, route_show_if, parse_repository_name,
check_region_blacklisted)
from endpoints.v2.blob import BLOB_DIGEST_ROUTE
from image.appc import AppCImageFormatter
from image.docker import ManifestException
from image.docker.squashed import SquashedDockerImageFormatter
from storage import Storage
from util.audit import track_and_log, wrap_repository
from util.http import exact_abort
from util.registry.filelike import wrap_with_handler
from util.registry.queuefile import QueueFile
from util.registry.queueprocess import QueueProcess
from util.registry.tarlayerformat import TarLayerFormatterReporter
from util.registry.torrent import (make_torrent, per_user_torrent_filename, public_torrent_filename,
PieceHasher, TorrentConfiguration)
logger = logging.getLogger(__name__)
verbs = Blueprint('verbs', __name__)
LAYER_MIMETYPE = 'binary/octet-stream'
class VerbReporter(TarLayerFormatterReporter):
def __init__(self, kind):
self.kind = kind
def report_pass(self, pass_count):
metric_queue.verb_action_passes.Inc(labelvalues=[self.kind, pass_count])
def _open_stream(formatter, tag, schema1_manifest, derived_image_id, handlers, reporter):
"""
This method generates a stream of data which will be replicated and read from the queue files.
This method runs in a separate process.
"""
# For performance reasons, we load the full image list here, cache it, then disconnect from
# the database.
with database.UseThenDisconnect(app.config):
layers = registry_model.list_parsed_manifest_layers(tag.repository, schema1_manifest, storage,
include_placements=True)
def image_stream_getter(store, blob):
def get_stream_for_storage():
current_image_stream = store.stream_read_file(blob.placements, blob.storage_path)
logger.debug('Returning blob %s: %s', blob.digest, blob.storage_path)
return current_image_stream
return get_stream_for_storage
def tar_stream_getter_iterator():
# Re-Initialize the storage engine because some may not respond well to forking (e.g. S3)
store = Storage(app, metric_queue, config_provider=config_provider, ip_resolver=ip_resolver)
# Note: We reverse because we have to start at the leaf layer and move upward,
# as per the spec for the formatters.
for layer in reversed(layers):
yield image_stream_getter(store, layer.blob)
stream = formatter.build_stream(tag, schema1_manifest, derived_image_id, layers,
tar_stream_getter_iterator, reporter=reporter)
for handler_fn in handlers:
stream = wrap_with_handler(stream, handler_fn)
return stream.read
def _sign_derived_image(verb, derived_image, queue_file):
""" Read from the queue file and sign the contents which are generated. This method runs in a
separate process. """
signature = None
try:
signature = signer.detached_sign(queue_file)
except:
logger.exception('Exception when signing %s deriving image %s', verb, derived_image)
return
# Setup the database (since this is a new process) and then disconnect immediately
# once the operation completes.
if not queue_file.raised_exception:
with database.UseThenDisconnect(app.config):
registry_model.set_derived_image_signature(derived_image, signer.name, signature)
def _write_derived_image_to_storage(verb, derived_image, queue_file):
""" Read from the generated stream and write it back to the storage engine. This method runs in a
separate process.
"""
def handle_exception(ex):
logger.debug('Exception when building %s derived image %s: %s', verb, derived_image, ex)
with database.UseThenDisconnect(app.config):
registry_model.delete_derived_image(derived_image)
queue_file.add_exception_handler(handle_exception)
# Re-Initialize the storage engine because some may not respond well to forking (e.g. S3)
store = Storage(app, metric_queue, config_provider=config_provider, ip_resolver=ip_resolver)
try:
store.stream_write(derived_image.blob.placements, derived_image.blob.storage_path, queue_file)
except IOError as ex:
logger.debug('Exception when writing %s derived image %s: %s', verb, derived_image, ex)
with database.UseThenDisconnect(app.config):
registry_model.delete_derived_image(derived_image)
queue_file.close()
def _torrent_for_blob(blob, is_public):
""" Returns a response containing the torrent file contents for the given blob. May abort
with an error if the state is not valid (e.g. non-public, non-user request).
"""
# Make sure the storage has a size.
if not blob.compressed_size:
abort(404)
# Lookup the torrent information for the storage.
torrent_info = registry_model.get_torrent_info(blob)
if torrent_info is None:
abort(404)
# Lookup the webseed path for the storage.
webseed = storage.get_direct_download_url(blob.placements, blob.storage_path,
expires_in=app.config['BITTORRENT_WEBSEED_LIFETIME'])
if webseed is None:
# We cannot support webseeds for storages that cannot provide direct downloads.
exact_abort(501, 'Storage engine does not support seeding.')
# Load the config for building torrents.
torrent_config = TorrentConfiguration.from_app_config(instance_keys, app.config)
# Build the filename for the torrent.
if is_public:
name = public_torrent_filename(blob.uuid)
else:
user = get_authenticated_user()
if not user:
abort(403)
name = per_user_torrent_filename(torrent_config, user.uuid, blob.uuid)
# Return the torrent file.
torrent_file = make_torrent(torrent_config, name, webseed, blob.compressed_size,
torrent_info.piece_length, torrent_info.pieces)
headers = {
'Content-Type': 'application/x-bittorrent',
'Content-Disposition': 'attachment; filename={0}.torrent'.format(name)}
return make_response(torrent_file, 200, headers)
def _torrent_repo_verb(repository, tag, manifest, verb, **kwargs):
""" Handles returning a torrent for the given verb on the given image and tag. """
if not features.BITTORRENT:
# Torrent feature is not enabled.
abort(406)
# Lookup an *existing* derived storage for the verb. If the verb's image storage doesn't exist,
# we cannot create it here, so we 406.
derived_image = registry_model.lookup_derived_image(manifest, verb, storage,
varying_metadata={'tag': tag.name},
include_placements=True)
if derived_image is None:
abort(406)
# Return the torrent.
torrent = _torrent_for_blob(derived_image.blob, model.repository.is_repository_public(repository))
# Log the action.
track_and_log('repo_verb', wrap_repository(repository), tag=tag.name, verb=verb, torrent=True,
**kwargs)
return torrent
def _verify_repo_verb(_, namespace, repo_name, tag_name, verb, checker=None):
permission = ReadRepositoryPermission(namespace, repo_name)
repo = model.repository.get_repository(namespace, repo_name)
repo_is_public = repo is not None and model.repository.is_repository_public(repo)
if not permission.can() and not repo_is_public:
logger.debug('No permission to read repository %s/%s for user %s with verb %s', namespace,
repo_name, get_authenticated_user(), verb)
abort(403)
if repo is not None and repo.kind.name != 'image':
logger.debug('Repository %s/%s for user %s is not an image repo', namespace, repo_name,
get_authenticated_user())
abort(405)
# Make sure the repo's namespace isn't disabled.
if not registry_model.is_namespace_enabled(namespace):
abort(400)
# Lookup the requested tag.
repo_ref = registry_model.lookup_repository(namespace, repo_name)
if repo_ref is None:
abort(404)
tag = registry_model.get_repo_tag(repo_ref, tag_name)
if tag is None:
logger.debug('Tag %s does not exist in repository %s/%s for user %s', tag, namespace, repo_name,
get_authenticated_user())
abort(404)
# Get its associated manifest.
manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
if manifest is None:
logger.debug('Could not get manifest on %s/%s:%s::%s', namespace, repo_name, tag.name, verb)
abort(404)
# Retrieve the schema1-compatible version of the manifest.
try:
schema1_manifest = registry_model.get_schema1_parsed_manifest(manifest, namespace,
repo_name, tag.name,
storage)
except ManifestException:
logger.exception('Could not get manifest on %s/%s:%s::%s', namespace, repo_name, tag.name, verb)
abort(400)
if schema1_manifest is None:
abort(404)
# If there is a data checker, call it first.
if checker is not None:
if not checker(tag, schema1_manifest):
logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repo_name, tag.name, verb)
abort(404)
# Preload the tag's repository information, so it gets cached.
assert tag.repository.namespace_name
assert tag.repository.name
return tag, manifest, schema1_manifest
def _repo_verb_signature(namespace, repository, tag_name, verb, checker=None, **kwargs):
# Verify that the tag exists and that we have access to it.
tag, manifest, _ = _verify_repo_verb(storage, namespace, repository, tag_name, verb, checker)
# Find the derived image storage for the verb.
derived_image = registry_model.lookup_derived_image(manifest, verb, storage,
varying_metadata={'tag': tag.name})
if derived_image is None or derived_image.blob.uploading:
return make_response('', 202)
# Check if we have a valid signer configured.
if not signer.name:
abort(404)
# Lookup the signature for the verb.
signature_value = registry_model.get_derived_image_signature(derived_image, signer.name)
if signature_value is None:
abort(404)
# Return the signature.
return make_response(signature_value)
@check_region_blacklisted()
def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, checker=None,
**kwargs):
# Verify that the image exists and that we have access to it.
logger.debug('Verifying repo verb %s for repository %s/%s with user %s with mimetype %s',
verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best)
tag, manifest, schema1_manifest = _verify_repo_verb(storage, namespace, repository,
tag_name, verb, checker)
# Load the repository for later.
repo = model.repository.get_repository(namespace, repository)
if repo is None:
abort(404)
# Check for torrent. If found, we return a torrent for the repo verb image (if the derived
# image already exists).
if request.accept_mimetypes.best == 'application/x-bittorrent':
metric_queue.repository_pull.Inc(labelvalues=[namespace, repository, verb + '+torrent', True])
return _torrent_repo_verb(repo, tag, manifest, verb, **kwargs)
# Log the action.
track_and_log('repo_verb', wrap_repository(repo), tag=tag.name, verb=verb, **kwargs)
metric_queue.repository_pull.Inc(labelvalues=[namespace, repository, verb, True])
is_readonly = app.config.get('REGISTRY_STATE', 'normal') == 'readonly'
# Lookup/create the derived image for the verb and repo image.
if is_readonly:
derived_image = registry_model.lookup_derived_image(
manifest, verb, storage,
varying_metadata={'tag': tag.name},
include_placements=True)
else:
derived_image = registry_model.lookup_or_create_derived_image(
manifest, verb, storage.preferred_locations[0], storage,
varying_metadata={'tag': tag.name},
include_placements=True)
if derived_image is None:
logger.error('Could not create or lookup a derived image for manifest %s', manifest)
abort(400)
if derived_image is not None and not derived_image.blob.uploading:
logger.debug('Derived %s image %s exists in storage', verb, derived_image)
is_head_request = request.method == 'HEAD'
metric_queue.pull_byte_count.Inc(derived_image.blob.compressed_size, labelvalues=[verb])
download_url = storage.get_direct_download_url(derived_image.blob.placements,
derived_image.blob.storage_path,
head=is_head_request)
if download_url:
logger.debug('Redirecting to download URL for derived %s image %s', verb, derived_image)
return redirect(download_url)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
logger.debug('Sending cached derived %s image %s', verb, derived_image)
return send_file(
storage.stream_read_file(derived_image.blob.placements, derived_image.blob.storage_path),
mimetype=LAYER_MIMETYPE)
logger.debug('Building and returning derived %s image', verb)
# Close the database connection before any process forking occurs. This is important because
# the Postgres driver does not react kindly to forking, so we need to make sure it is closed
# so that each process will get its own unique connection.
database.close_db_filter(None)
def _cleanup():
# Close any existing DB connection once the process has exited.
database.close_db_filter(None)
hasher = PieceHasher(app.config['BITTORRENT_PIECE_SIZE'])
def _store_metadata_and_cleanup():
if is_readonly:
return
with database.UseThenDisconnect(app.config):
registry_model.set_torrent_info(derived_image.blob, app.config['BITTORRENT_PIECE_SIZE'],
hasher.final_piece_hashes())
registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes)
# Create a queue process to generate the data. The queue files will read from the process
# and send the results to the client and storage.
unique_id = (derived_image.unique_id
if derived_image is not None
else hashlib.sha256('%s:%s' % (verb, uuid.uuid4())).hexdigest())
handlers = [hasher.update]
reporter = VerbReporter(verb)
args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter)
queue_process = QueueProcess(
_open_stream,
8 * 1024,
10 * 1024 * 1024, # 8K/10M chunk/max
args,
finished=_store_metadata_and_cleanup)
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
if not is_readonly:
storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
# If signing is required, add a QueueFile for signing the image as we stream it out.
signing_queue_file = None
if sign and signer.name:
signing_queue_file = QueueFile(queue_process.create_queue(), 'signing')
# Start building.
queue_process.run()
# Start the storage saving.
if not is_readonly:
storage_args = (verb, derived_image, storage_queue_file)
QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup)
if sign and signer.name:
signing_args = (verb, derived_image, signing_queue_file)
QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
# Return the client's data.
return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)
def os_arch_checker(os, arch):
def checker(tag, manifest):
try:
image_json = json.loads(manifest.leaf_layer.raw_v1_metadata)
except ValueError:
logger.exception('Could not parse leaf layer JSON for manifest %s', manifest)
return False
except TypeError:
logger.exception('Could not parse leaf layer JSON for manifest %s', manifest)
return False
# Verify the architecture and os.
operating_system = image_json.get('os', 'linux')
if operating_system != os:
return False
architecture = image_json.get('architecture', 'amd64')
# Note: Some older Docker images have 'x86_64' rather than 'amd64'.
# We allow the conversion here.
if architecture == 'x86_64' and operating_system == 'linux':
architecture = 'amd64'
if architecture != arch:
return False
return True
return checker
@route_show_if(features.ACI_CONVERSION)
@anon_protect
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/sig/<os>/<arch>/', methods=['GET'])
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci.asc/<os>/<arch>/', methods=['GET'])
@process_auth
def get_aci_signature(server, namespace, repository, tag, os, arch):
return _repo_verb_signature(namespace, repository, tag, 'aci', checker=os_arch_checker(os, arch),
os=os, arch=arch)
@route_show_if(features.ACI_CONVERSION)
@anon_protect
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=[
'GET', 'HEAD'])
@process_auth
def get_aci_image(server, namespace, repository, tag, os, arch):
return _repo_verb(namespace, repository, tag, 'aci',
AppCImageFormatter(), sign=True, checker=os_arch_checker(os, arch), os=os,
arch=arch)
@anon_protect
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
@process_auth
def get_squashed_tag(namespace, repository, tag):
return _repo_verb(namespace, repository, tag, 'squash', SquashedDockerImageFormatter())
@route_show_if(features.BITTORRENT)
@anon_protect
@verbs.route('/torrent{0}'.format(BLOB_DIGEST_ROUTE), methods=['GET'])
@process_auth
@parse_repository_name()
@check_region_blacklisted(namespace_name_kwarg='namespace_name')
def get_tag_torrent(namespace_name, repo_name, digest):
repo = model.repository.get_repository(namespace_name, repo_name)
repo_is_public = repo is not None and model.repository.is_repository_public(repo)
permission = ReadRepositoryPermission(namespace_name, repo_name)
if not permission.can() and not repo_is_public:
abort(403)
user = get_authenticated_user()
if user is None and not repo_is_public:
# We can not generate a private torrent cluster without a user uuid (e.g. token auth)
abort(403)
if repo is not None and repo.kind.name != 'image':
abort(405)
repo_ref = registry_model.lookup_repository(namespace_name, repo_name)
if repo_ref is None:
abort(404)
blob = registry_model.get_repo_blob_by_digest(repo_ref, digest, include_placements=True)
if blob is None:
abort(404)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'torrent', True])
return _torrent_for_blob(blob, repo_is_public)
@verbs.route('/_internal_ping')
@anon_allowed
def internal_ping():
return make_response('true', 200)
|
the-stack_0_27330
|
# License information goes here
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# The line above will help with 2to3 support.
def most_recent_tag(tags,username=None):
"""Scan an SVN tags directory and return the most recent tag.
Parameters
----------
tags : str
A URL pointing to an SVN tags directory.
username : str, optional
If set, pass the value to SVN's ``--username`` option.
Returns
-------
most_recent_tag : str
The most recent tag found in ``tags``. The tag will be converted into
a `PEP 386`_ / `PEP 440`_ -style version string, if it does not already
follow that convention.
Notes
-----
This function tries really, really hard to convert any non-standard tags
into `PEP 386`_ / `PEP 440`_ -style version strings. Any tags that fail
this conversion step are ignored (and they should be!).
.. _`PEP 386`: http://legacy.python.org/dev/peps/pep-0386/
.. _`PEP 440`: http://legacy.python.org/dev/peps/pep-0440/
This function was 'trained' on the tags in the idlutils, idlspec2d and
photoop products.
"""
from distutils.version import StrictVersion
from subprocess import Popen, PIPE
import re
bare_digit_re = re.compile(r'^v?([0-9]+)(a|b|)$') # match things like v3a or v4
almost_good_re = re.compile(r'^v?([0-9]+)(\.[0-9]+)(\.[0-9]+)*(a|b|)$') # match things like v5.0.2a
command = ['svn']
if username is not None: command += ['--username', username]
command += ['ls',tags]
proc = Popen(command,stdout=PIPE,stderr=PIPE)
out, err = proc.communicate()
out = out.decode("utf-8") if isinstance(out,bytes) else out
err = err.decode("utf-8") if isinstance(err,bytes) else err
try:
tags = [v.rstrip('/').replace('_','.') for v in out.split('\n') if len(v) > 0]
except TypeError:
tags = [v.rstrip('/').replace('_','.') for v in out.decode('utf-8').split('\n') if len(v) > 0]
valid_tags = list()
for t in tags:
v = None
try:
v = StrictVersion(t)
except ValueError:
if t.startswith('v'):
try:
v = StrictVersion(t[1:])
except ValueError:
m = bare_digit_re.match(t)
if m is not None:
g = m.groups()
if len(g[1]) > 0:
v = StrictVersion(g[0]+'.0'+g[1]+'0')
else:
v = StrictVersion(g[0]+'.0')
else:
m = almost_good_re.match(t)
if m is not None:
g = m.groups()
tt = g[0]+g[1]
if g[2] is None:
tt += '.0'
else:
tt += g[2]
if len(g[3]) > 0:
tt += g[3]+'0'
try:
v = StrictVersion(tt)
except ValueError:
# Give up at this point!
# print(t)
pass
if v is not None:
valid_tags.append(v)
if len(valid_tags) == 0:
return '0.0.0'
mrt = sorted(valid_tags)
return str(mrt[-1])
|
the-stack_0_27331
|
import argparse
import json
import math
import os
import shutil
from pprint import pprint
from collections import namedtuple
import tensorflow as tf
from tqdm import tqdm
import numpy as np
import sys
from trainers.bidaf_trainer import BiDAFTrainer
from models.bidaf_model import BiDAFModel
from models.bidaf_model import get_model
from utils.logger import Logger
sys.path.append("./")
def main(config):
"""
config 에 디렉토리 추가해주고
"""
if not os.path.exists(config.summary_dir):
os.makedirs(config.summary_dir)
if config.mode == 'train':
_train(config)
elif config.mode == 'test':
_test(config)
else:
raise ValueError("invalid value for 'mode': {}".format(config.mode))
def _train(config):
total_path = os.path.join(config.data_dir, "fixed_data_total.json")
train_data_path = os.path.join(config.data_dir, "data_train.json")
train_fixed_data_path = os.path.join(config.data_dir, "fixed_data_train.json")
with open(train_data_path, 'r') as fh:
train_data = json.load(fh)
with open(train_fixed_data_path, 'r') as fh:
train_fixed_data = json.load(fh)
dev_data_path = os.path.join(config.data_dir, "data_dev.json")
dev_fixed_data_path = os.path.join(config.data_dir, "fixed_data_dev.json")
with open(dev_data_path, 'r') as fh:
dev_data = json.load(fh)
with open(dev_fixed_data_path, 'r') as fh:
dev_fixed_data = json.load(fh)
# train_data = np.load(train_data_path)
# dev_data = np.load(dev_data_path)
'''degug'''
# test_data_path = os.path.join(config.data_dir, "data_test.json")
# test_fixed_data_path = os.path.join(config.data_dir, "fixed_data_test.json")
# with open(test_data_path, 'r') as fh:
# test_data = json.load(fh)
# with open(test_fixed_data_path, 'r') as fh:
# test_fixed_data = json.load(fh)
#
# train_data = test_data
# train_fixed_data = test_fixed_data
''''''
total_data = json.load(open(total_path,'r'))
total_w2v_dict = total_data['w2v_dict']
total_w2i_dict = total_data['w2i_dict']
total_c2i_dict = total_data['c2i_dict']
assert len(total_w2v_dict) == len(total_w2i_dict)
i2v_dict = {total_w2i_dict[word]: vec for i, (word, vec) in enumerate(total_w2v_dict.items()) if word in total_w2i_dict}
emb_mat = np.array([i2v_dict[i] if i in i2v_dict \
else np.zeros(config.word_emb_size) \
for i in tqdm(range(len(total_w2v_dict)))
])
config.emb_mat = tf.convert_to_tensor(emb_mat,dtype='float')
print(f"embedding done : {config.emb_mat.get_shape()}")
x_i = train_data['x_i']
qs_w = tf.ragged.constant(train_data['qs_w'])
qs_c = tf.ragged.constant(train_data['qs_c'])
y = train_data['y']
x_aw = tf.ragged.constant(train_fixed_data['x_aw'])
x_ac = tf.ragged.constant(train_fixed_data['x_ac'])
train_context = {
'w2i_dict': total_w2i_dict,
'c2i_dict': total_c2i_dict,
'x_aw': x_aw,
'x_ac': x_ac
}
dataset = tf.data.Dataset.from_tensor_slices({
'x_i':x_i,
'qs_w':qs_w,
'qs_c':qs_c,
'y':y})
dataset = dataset.shuffle(1000000).repeat().batch(config.batch_size)
iterator = dataset.make_one_shot_iterator()
next_batch = iterator.get_next()
dev_context = {
'w2i_dict': total_w2i_dict,
'c2i_dict': total_c2i_dict,
'x_aw':tf.ragged.constant(dev_fixed_data['x_aw']),
'x_ac':tf.ragged.constant(dev_fixed_data['x_ac'])
}
dev_batch = {
'x_i':dev_data['x_i'],
'qs_w':tf.ragged.constant(dev_data['qs_w']),
'qs_c':tf.ragged.constant(dev_data['qs_c']),
'y':dev_data['y']}
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(allow_soft_placement=True))
model = get_model(config)
logger = Logger(sess,config)
trainer = BiDAFTrainer(model,config,logger)
num_steps = int(math.ceil(len(y) / config.batch_size)) * config.num_epochs
for i in tqdm(range(num_steps),mininterval=1):
# for _ in tqdm( total = num_stps):
batch = sess.run(next_batch)
global_step = sess.run(model.global_step) + 1 # +1 because all calculations are done after step
get_summary = global_step % config.log_period == 0
loss, summary, train_op = trainer.step(sess, batch, context, get_summary=get_summary)
if global_step % config.dev_period == 0:
print("\nEvaluation:")
trainer.dev_step(sess, dev_batch, context, get_summary=get_summary)
print("")
def _test(config):
pass
class Config(object):
def __init__(self, **entries):
self.__dict__.update(entries)
def _run():
parser = argparse.ArgumentParser()
parser.add_argument("config_path", help="config.json path")
parser.add_argument("--mode",required=True,help="train? test?")
args = parser.parse_args()
print(f'args : {args}')
with open(args.config_path, 'r') as fh:
config = Config(**json.load(fh))
config.mode = args.mode
main(config)
if __name__ == '__main__':
_run()
|
the-stack_0_27332
|
import logging
import cv2
from keras.models import model_from_yaml
class Recognition:
def __init__(self, model_file, weights_file, dictionary):
self.logger = logging.getLogger(__name__)
with open(model_file, "r") as file:
self.model = model_from_yaml(file.read())
height = self.model.inputs[0].shape[1]
self.img_size = (height, height)
self.model.load_weights(weights_file)
with open(dictionary, "r") as file:
self.dictionary = {}
data = file.read().split("\n")
for index, character in enumerate(data):
self.dictionary[index] = character
self.logger.debug("Loaded model")
def recognize_character(self, image, resize=True):
if resize:
image = cv2.resize(image, self.img_size)
result = self.model.predict(image[None, :])
index = result.argmax(axis=-1)[0]
ret = (index, self.dictionary[index], result[0][index])
logging.debug("Found {} (probability {} %)".format(ret[1], ret[2]))
return ret
|
the-stack_0_27333
|
import re, math, os
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import gridplot, row, layout
from bokeh.models import ColumnDataSource, ranges, LabelSet, Div, SingleIntervalTicker, LinearAxis
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
class Chat(object):
def __init__(self, file):
"""Initialises the object with file-name and fetches the content into self.chat_cntnt"""
self.file = file
with open(self.file, "r") as chat:
self.chat_cntnt = chat.read()
def number_of_messages(self):
"""Finds and returns self.tot_num_msgs and self.num_media"""
pttrn_num_msgs = re.compile(r'\b\d*/\d*/\d*, \d*:\d* [AP]M - (.*?): ')
matches = pttrn_num_msgs.findall(self.chat_cntnt)
self.tot_num_msgs = len(matches)
pttrn_num_media = re.compile(r'\s<Media omitted>')
matches = pttrn_num_media.findall(self.chat_cntnt)
self.num_media = len(matches)
return self.tot_num_msgs, self.num_media
def number_of_contributing_members(self):
"""Finds and returns self.num_mem and self.member_list"""
members = re.findall(r'\b\d*/\d*/\d*, \d*:\d* [AP]M - (.*?): ', self.chat_cntnt)
self.member_list = list(set(members))
self.member_list_set = set(members)
self.num_mem = len(self.member_list)
for idx, peep in enumerate(self.member_list):
if u'\u202a' in peep:
self.member_list[idx] = peep.strip(u'\u202a')
if u'\u202c' in peep:
self.member_list[idx] = self.member_list[idx].strip(u'\u202c')
return self.num_mem, self.member_list
# Analysis point 3, 3a and 3b.
# (dependent on self.number_of_contributing_members())
def message_by_member_splitup(self):
# self.number_of_contributing_members()
self.mem_msg_splitup = {}
for peep in self.member_list_set:
pttrn_mem_by_msg = re.compile(r'\b\d*/\d*/\d*, \d*:\d* [AP]M - '+re.escape(peep)+r': ')
matches = pttrn_mem_by_msg.findall(self.chat_cntnt)
self.mem_msg_splitup[peep.strip(u'\u202a').strip(u'\u202c')] = len(matches)
self.max_msg_peep = max(self.mem_msg_splitup, key=self.mem_msg_splitup.get)
self.numMsgs_by_max_msg_peep = max(self.mem_msg_splitup.values())
self.min_msg_peep = min(self.mem_msg_splitup, key=self.mem_msg_splitup.get)
self.numMsgs_by_min_msg_peep = min(self.mem_msg_splitup.values())
return self.mem_msg_splitup, {self.max_msg_peep:self.numMsgs_by_max_msg_peep}, {self.min_msg_peep:self.numMsgs_by_min_msg_peep}
# Analysis point 5 and 5a.
# (dependent on self.number_of_contributing_members())
def media_by_member_splitup(self):
# self.number_of_contributing_members()
self.mem_media_splitup = {}
for peep in self.member_list_set:
pttrn_media_by_mem = re.compile(r'\b\d*/\d*/\d*, \d*:\d* [AP]M - '+re.escape(peep)+r':\s<Media omitted>')
matches = pttrn_media_by_mem.findall(self.chat_cntnt)
self.mem_media_splitup[peep.strip(u'\u202a').strip(u'\u202c')] = len(matches)
self.max_media_peep = max(self.mem_media_splitup, key=self.mem_media_splitup.get)
self.numMedia_by_max_media_peep = max(self.mem_media_splitup.values())
self.min_media_peep = min(self.mem_media_splitup, key=self.mem_media_splitup.get)
self.numMedia_by_min_media_peep = min(self.mem_media_splitup.values())
return self.mem_media_splitup, {self.max_media_peep:self.numMedia_by_max_media_peep}, {self.min_media_peep:self.numMedia_by_min_media_peep}
def time_stats(self):
'''Returns (time-span of chat, time of first message, time of second message, msg timestamp list, msg datestamp list, msg hourstamp list, msg monthstamp list)'''
self.msg_timestamps = []
self.msg_datestamps = []
self.msg_hourstamps = []
self.msg_monthstamps = []
self.media_timestamps = []
self.media_datestamps = []
self.media_hourstamps = []
self.media_monthstamps = []
for msg_ln in self.chat_cntnt.splitlines():
pttrn_new_datetime = re.compile(r'(\b\d*/\d*/\d*, \d*:\d* [AP]M) - .*?:')
pttrn_new_date = re.compile(r'(\b\d*/\d*/\d*), \d*:\d* [AP]M - .*?:')
pttrn_new_hour = re.compile(r'(\b\d*/\d*/\d*), (\d*):\d* ([AP]M) - .*?:')
pttrn_new_month = re.compile(r'\b\d*/(\d*/\d*), \d*:\d* [AP]M - .*?:')
pttrn_new_datetime_media = re.compile(r'(\b\d*/\d*/\d*, \d*:\d* [AP]M) - .*?: <Media omitted>')
pttrn_new_date_media = re.compile(r'(\b\d*/\d*/\d*), \d*:\d* [AP]M - .*?: <Media omitted>')
pttrn_new_hour_media = re.compile(r'(\b\d*/\d*/\d*), (\d*):\d* ([AP]M) - .*?: <Media omitted>')
pttrn_new_month_media = re.compile(r'\b\d*/(\d*/\d*), \d*:\d* [AP]M - .*?: <Media omitted>')
datetime_matches = pttrn_new_datetime.findall(msg_ln)
date_matches = pttrn_new_date.findall(msg_ln)
hour_matches = pttrn_new_hour.findall(msg_ln)
month_matches = pttrn_new_month.findall(msg_ln)
datetime_matches_media = pttrn_new_datetime_media.findall(msg_ln)
date_matches_media = pttrn_new_date_media.findall(msg_ln)
hour_matches_media = pttrn_new_hour_media.findall(msg_ln)
month_matches_media = pttrn_new_month_media.findall(msg_ln)
if len(datetime_matches) == 1:
self.msg_timestamps.append(datetime_matches[0])
self.msg_datestamps.append(date_matches[0])
self.msg_hourstamps.append(' '.join(hour_matches[0]))
self.msg_monthstamps.append(month_matches[0])
if len(datetime_matches_media) == 1:
self.media_timestamps.append(datetime_matches_media[0])
self.media_datestamps.append(date_matches_media[0])
self.media_hourstamps.append(' '.join(hour_matches_media[0]))
self.media_monthstamps.append(month_matches_media[0])
self.chat_timeLength = datetime.strptime(self.msg_timestamps[-1], '%d/%m/%y, %I:%M %p') - datetime.strptime(self.msg_timestamps[0], '%d/%m/%y, %I:%M %p')
return (self.chat_timeLength,
self.msg_timestamps[0], self.msg_timestamps[-1], self.msg_timestamps,
self.msg_datestamps,
self.msg_hourstamps,
self.msg_monthstamps,
self.media_timestamps,
self.media_datestamps,
self.media_hourstamps,
self.media_monthstamps
)
def dash_it_up(self):
# print("DASH IT UP BEGIN:\t\t" + datetime.strftime(datetime.now(), '%I:%M:%S'))
total_num_messages, total_num_media = self.number_of_messages()
# print("self.number_of_messages() executed")
num_members, member_list = self.number_of_contributing_members()
# print("self.number_of_contributing_members() executed")
member_numMsg_dict, max_msg_peep_dict, min_msg_peep_dict = self.message_by_member_splitup()
# print("self.message_by_member_splitup() executed")
member_numMedia_dict, max_media_peep_dict, min_media_peep_dict = self.media_by_member_splitup()
# print("self.media_by_member_splitup() executed")
chat_timespan, msg_one_t, msg_last_t, all_times, all_dates, all_hours, all_months, all_times_media, all_dates_media, all_hours_media, all_months_media= self.time_stats()
# print("self.time_stats() executed")
output_file("./HTMLs/_STATISTICS_{}.html".format(os.path.basename(os.path.splitext(self.file)[0])))
#PLOT 0: TITLE OF THE PAGE===========================================================================================================================#
title_plot = figure(plot_height=30, logo=None)
title_plot.title.text = "{} ({} participants)".format(os.path.basename(os.path.splitext(self.file)[0]), num_members)
# title_plot.title.text_font = "SF Pro Display"
title_plot.title.text_font_size = "55px"
title_plot.title.text_font_style = "bold"
title_plot.title.align = "center"
#DISTRIBUTION PLOT SETTINGS====================================================#
title_text_font_size = "40px"
xtick_font_size_value = (-1/7*num_members + 152/7) if num_members>=20 else 16
xtick_text_font_size = "{}px".format(xtick_font_size_value)
individual_bar_label_size = "{}px".format(xtick_font_size_value)
colors = [""]
#PLOT 1: MESSAGE DISTRIBUTION===========================================================================================================================#
source = ColumnDataSource(dict(x=list(self.mem_msg_splitup.keys()), y=list(self.mem_msg_splitup.values())))
plot1 = figure(x_range=list(self.mem_msg_splitup.keys()), logo=None, sizing_mode="scale_width", plot_height=400)
plot1.title.text = "Messages: {}".format(total_num_messages)
plot1.title.text_font_size = title_text_font_size
# plot1.title.text_font = page_font
labels = LabelSet(x='x', y='y', text='y', level='glyph',
x_offset=-xtick_font_size_value/2, y_offset=0, source=source, render_mode='canvas',
text_font_size=individual_bar_label_size,
# text_font=page_font
)
plot1.vbar(source=source,
x='x',
top='y',
width=0.8)
plot1.add_layout(labels)
plot1.xgrid.grid_line_color = None
plot1.y_range.start = 0
plot1.xaxis.major_label_orientation = math.pi/2
plot1.xaxis.major_label_text_font_size = xtick_text_font_size
# plot1.xaxis.major_label_text_font = page_font
plot1.yaxis.axis_label = "#messages"
plot1.yaxis.major_label_orientation = math.pi/2
# plot1.yaxis.major_label_text_font = page_font
plot1.yaxis.major_label_text_font_size = "16px"
plot1.yaxis.axis_label_text_font_size = "16px"
# plot1.yaxis.axis_label_text_font = page_font
#PLOT 2: MEDIA DISTRIBUTION===========================================================================================================================#
source = ColumnDataSource(dict(x=list(self.mem_media_splitup.keys()), y=list(self.mem_media_splitup.values())))
plot2 = figure(x_range=list(self.mem_media_splitup.keys()), logo=None, sizing_mode="scale_width", plot_height=400)
plot2.title.text = "Media: {}".format(total_num_media)
plot2.title.text_font_size = title_text_font_size
# plot2.title.text_font = page_font
labels = LabelSet(x='x', y='y', text='y', level='glyph',
x_offset=-xtick_font_size_value/2, y_offset=0, source=source, render_mode='canvas',
text_font_size=individual_bar_label_size,
# text_font=page_font
)
plot2.vbar(source=source,
x='x',
top='y',
width=0.8, color="firebrick")
plot2.add_layout(labels)
plot2.xgrid.grid_line_color = None
plot2.y_range.start = 0
plot2.xaxis.major_label_orientation = math.pi/2
plot2.xaxis.major_label_text_font_size = xtick_text_font_size
# plot2.xaxis.major_label_text_font = page_font
plot2.yaxis.axis_label = "#media"
plot2.yaxis.major_label_orientation = math.pi/2
# plot2.yaxis.major_label_text_font = page_font
# plot2.yaxis.major_label_text_font_size = "16px"
plot2.yaxis.axis_label_text_font_size = "16px"
# plot2.yaxis.axis_label_text_font = page_font
#PLOT 3: MEMBER LIST & (TOTAL NUMBER OF MEMBERS)===========================================================================================================================#
plot3 = figure(plot_height=13, logo=None, sizing_mode="scale_width")
name_str = ''
for x in member_list:
if name_str == '':
name_str += x
else:
name_str += ', '+x
plot3.title.text = "Participants ({}): {}".format(num_members, name_str)
plot3.title.text_font_size = "18px"
# plot3.title.text_font = page_font
plot3.title.text_font_style = "normal"
plot3.title.align = "center"
#TIME DISTRIBUTION PLOTS' LOCAL FUNCTIONS===========================================================#
def perdelta(start, end, delta):
curr = start
while curr<end:
yield curr
curr += delta
def timeBlockSpan(first, last):
"""
Returns: 1 ==> minutes (very new chat)
2 ==> hours (relatively new chat)
3 ==> days (relatively old chat)
4 ==> months (established chat)(cancelled)
"""
t_delta = last - first
if t_delta.total_seconds() <= 3600:
return 1
elif 3600 < t_delta.total_seconds() <= 259200:
return 2
elif 259200 < t_delta.total_seconds() and t_delta.days <= 91:
return 3
elif t_delta.days > 91:
return 4
#PLOT 4: MESSAGE TIME DISTRIBUTION===========================================================================================================================#
# print("Begin" + datetime.strftime(datetime.now(), '%I:%M:%S'))
all_dates_dtObjs = []
for stamp in all_dates:
all_dates_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y').date())
all_times_dtObjs = []
for stamp in all_times:
all_times_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y, %I:%M %p'))
all_hours_dtObjs = []
for stamp in all_hours:
all_hours_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y %I %p'))
all_months_dtObjs = []
for stamp in all_months:
all_months_dtObjs.append(datetime.strptime(stamp, '%m/%y'))
# print("created all dtObjs" + datetime.strftime(datetime.now(), '%I:%M:%S'))
first_date, last_date = all_dates_dtObjs[0], all_dates_dtObjs[-1]
first_dt, last_dt = all_times_dtObjs[0], all_times_dtObjs[-1]
first_hour, last_hour = all_hours_dtObjs[0], all_hours_dtObjs[-1]
first_month, last_month = all_months_dtObjs[0], all_months_dtObjs[-1]
timeBlockSpan_decision = timeBlockSpan(first_dt, last_dt)
# print("TBS decision generated" + datetime.strftime(datetime.now(), '%I:%M:%S'))
if timeBlockSpan_decision == 1:
all_times_msgs_distr = {}
for i in perdelta(first_dt, last_dt+timedelta(seconds=60), timedelta(seconds=60)):
all_times_msgs_distr[i] = all_times_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%I:%M %p") for x in all_times_msgs_distr.keys()]
y = list(all_times_msgs_distr.values())
elif timeBlockSpan_decision == 2:
all_hours_msgs_distr = {}
for i in perdelta(first_hour, last_hour+timedelta(hours=1), timedelta(hours=1)):
all_hours_msgs_distr[i] = all_hours_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%d/%m, %H-{} hours".format(x.hour+1)) for x in all_hours_msgs_distr.keys()]
y = list(all_hours_msgs_distr.values())
elif timeBlockSpan_decision == 3:
all_dates_msgs_distr = {}
for i in perdelta(first_date, last_date+timedelta(days=1), timedelta(days=1)):
all_dates_msgs_distr[i] = all_dates_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%d %B '%y") for x in all_dates_msgs_distr.keys()]
y = list(all_dates_msgs_distr.values())
elif timeBlockSpan_decision == 4:
all_months_msgs_distr = {}
for i in perdelta(first_month, last_month+relativedelta(months=+1), relativedelta(months=+1)):
all_months_msgs_distr[i] = all_months_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%B '%y") for x in all_months_msgs_distr.keys()]
y = list(all_months_msgs_distr.values())
# print(datetime.strftime(datetime.now(), '%I:%M:%S'))
num_bars_on_plot = len(xLabels)
xtick_font_size_value = (-1/7*num_bars_on_plot + 152/5.5) if num_bars_on_plot>=40 else 16
xtick_text_font_size = "{}px".format(xtick_font_size_value)
source = ColumnDataSource(dict(x=xLabels, y=y))
plot4 = figure(plot_height=180, logo=None, sizing_mode="scale_width", x_range=xLabels)
plot4.title.text = "Messages time distribution [{} - {} (~{} days)]".format(msg_one_t, msg_last_t, chat_timespan.days+1)
plot4.title.text_font_size = title_text_font_size
labels = LabelSet(x='x', y='y', text='y', level='glyph',
x_offset=-6, y_offset=0, source=source, render_mode='canvas',
text_font_size=xtick_text_font_size,
# text_font=page_font
)
plot4.vbar(source=source, x='x', top='y', width=0.9, color="#9EA09E")
plot4.add_layout(labels)
plot4.xaxis.major_label_orientation = math.pi/2
plot4.xaxis.major_label_text_font_size = xtick_text_font_size
plot4.yaxis.axis_label = "Activity (#messages)"
plot4.yaxis.axis_label_text_font_size = "16px"
#PLOT 5: MEDIA TIME DISTRIBUTION===========================================================================================================================#
all_dates_media_dtObjs = []
for stamp in all_dates_media:
all_dates_media_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y').date())
all_times_media_dtObjs = []
for stamp in all_times_media:
all_times_media_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y, %I:%M %p'))
all_hours_media_dtObjs = []
for stamp in all_hours_media:
all_hours_media_dtObjs.append(datetime.strptime(stamp, '%d/%m/%y %I %p'))
all_months_media_dtObjs = []
for stamp in all_months_media:
all_months_media_dtObjs.append(datetime.strptime(stamp, '%m/%y'))
# print("created all dtObjs" + datetime.strftime(datetime.now(), '%I:%M:%S'))
first_date_media, last_date_media = all_dates_media_dtObjs[0], all_dates_media_dtObjs[-1]
first_dt_media, last_dt_media = all_times_media_dtObjs[0], all_times_media_dtObjs[-1]
first_hour_media, last_hour_media = all_hours_media_dtObjs[0], all_hours_media_dtObjs[-1]
first_month_media, last_month_media = all_months_media_dtObjs[0], all_months_media_dtObjs[-1]
timeBlockSpan_decision = timeBlockSpan(first_dt_media, last_dt_media)
# print("TBS decision generated" + datetime.strftime(datetime.now(), '%I:%M:%S'))
if timeBlockSpan_decision == 1:
all_times_media_distr = {}
for i in perdelta(first_dt_media, last_dt_media+timedelta(seconds=60), timedelta(seconds=60)):
all_times_media_distr[i] = all_times_media_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%I:%M %p") for x in all_times_media_distr.keys()]
y = list(all_times_media_distr.values())
elif timeBlockSpan_decision == 2:
all_hours_media_distr = {}
for i in perdelta(first_hour_media, last_hour_media+timedelta(hours=1), timedelta(hours=1)):
all_hours_media_distr[i] = all_hours_media_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%d/%m, %H-{} hours".format(x.hour+1)) for x in all_hours_media_distr.keys()]
y = list(all_hours_media_distr.values())
elif timeBlockSpan_decision == 3:
all_dates_media_distr = {}
for i in perdelta(first_date_media, last_date_media+timedelta(days=1), timedelta(days=1)):
all_dates_media_distr[i] = all_dates_media_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%d %B '%y") for x in all_dates_media_distr.keys()]
y = list(all_dates_media_distr.values())
elif timeBlockSpan_decision == 4:
all_months_media_distr = {}
for i in perdelta(first_month_media, last_month_media+relativedelta(months=+1), relativedelta(months=+1)):
all_months_media_distr[i] = all_months_media_dtObjs.count(i)
xLabels = [datetime.strftime(x, "%B '%y") for x in all_months_media_distr.keys()]
y = list(all_months_media_distr.values())
# print(datetime.strftime(datetime.now(), '%I:%M:%S'))
num_bars_on_plot = len(xLabels)
xtick_font_size_value = (-1/7*num_bars_on_plot + 152/5.5) if num_bars_on_plot>=40 else 16
xtick_text_font_size = "{}px".format(xtick_font_size_value)
source = ColumnDataSource(dict(x=xLabels, y=y))
plot5 = figure(plot_height=180, logo=None, sizing_mode="scale_width", x_range=xLabels)
plot5.title.text = "Media time distribution [{} - {} (~{} days)]".format(msg_one_t, msg_last_t, chat_timespan.days+1)
plot5.title.text_font_size = title_text_font_size
labels = LabelSet(x='x', y='y', text='y', level='glyph',
x_offset=-6, y_offset=0, source=source, render_mode='canvas',
text_font_size=xtick_text_font_size,
# text_font=page_font
)
plot5.vbar(source=source, x='x', top='y', width=0.9, color="#FFC300")
plot5.add_layout(labels)
plot5.xaxis.major_label_orientation = math.pi/2
plot5.xaxis.major_label_text_font_size = xtick_text_font_size
plot5.yaxis.axis_label = "Activity (#media)"
plot5.yaxis.axis_label_text_font_size = "16px"
#DASHBOARD ASSIMILATION===========================================================================================================================#
dashboard = layout(
children=[
[title_plot],
[plot3],
[plot1, plot2],
[plot4],
[plot5]
],
sizing_mode="scale_width"
)
show(dashboard)
# print("DASH IT UP END:\t\t" + datetime.strftime(datetime.now(), '%I:%M:%S'))
def main():
chat = Chat("./chats/WhatsApp Chat with xyz.txt")
chat.dash_it_up()
# print("{} done at {}".format(C, datetime.strftime(datetime.now(), '%I:%M:%S %p')))
if __name__ == '__main__':
main()
|
the-stack_0_27334
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Curso de Elementos de Sistemas
# Desenvolvido por: Rafael Corsi <[email protected]>
#
# Data de criação: 11/2017
#
# Resumo: executa simulação da CPU via modelsim
import os
import shutil
import argparse
import fileinput
import time
import platform
from log import logError, logSim
# config file
CONFIG_FILE = "config.txt"
# TST DIR files
TST_DIR = "tst/"
# RAM files
RAM_INIT_FILE = "_in.mif"
RAM_END_FILE = "_tst.mif"
RAM_END_SIMU_FILE = "_end.mif"
OUT_SIM_LST = ""
# Path to vsim #
PATH_VSIM = os.path.join(os.environ.get('VUNIT_MODELSIM_PATH'), "vsim")
# Files used on this simulation
PATH_WORK = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..", "Z01-Simulator-rtl-2")
END = "\n"
def setRuntimeDo(time, doFile):
for line in fileinput.input(doFile, inplace = 1):
if "run" in line:
print("run "+str(time)+" ns")
else:
print(line.rstrip())
def rmFile(f):
try:
os.remove(f)
except OSError:
pass
# Recebe como parametro um diretorio do tipo teste
# e um caminho para o arquivo de programa (.mif)
# e executa as simulações contidas no arquivo de
# configuracao.
def simulateFromTestDir(testDir, hackDir, gui, verbose, rtlDir=PATH_WORK):
configFile = testDir+CONFIG_FILE
# caminho do arquivo de configuracao
pwd = os.path.dirname(configFile) + "/"
os.path.abspath(hackDir)
os.path.abspath(configFile)
# file
f = ""
# Verificando se é diretorio
if not os.path.exists(configFile):
logError("Favor passar como parametro um diretorio do tipo test")
return(1)
# verifica se exist arquivo de config
try:
f = open(configFile, 'r')
except:
logError("Arquivo {} não encontrado".format(CONFIG_FILE))
return(1)
for l in f:
if (l.strip()[0] != '#') or (not l.strip()):
# pega parametros e atribui caminhos globais
# par[0] : Nome do teste (subpasta)
# par[1] : quantidade de testes a serem executados
# par[2] : tempo de simulação em ns
par = l.rstrip().split();
# nome do arquivo
name = par[0]
# tempo total de simulacao
sTime = int(par[2])
# paths
mif = hackDir+name+".mif"
# verifica se arquivo existe
if os.path.isfile(mif):
# simulate
for i in range(0, int(par[1])):
# usar join ?
ramIn = pwd+TST_DIR+name+"/"+name+"{}".format(i) + RAM_INIT_FILE
ramOut = pwd+TST_DIR+name+"/"+name+str(i) + RAM_END_SIMU_FILE
print("Simulating " + os.path.relpath(mif) + " teste : " + str(i))
if os.path.isfile(ramIn):
tic = time.time()
if verbose is True :
print(ramIn)
print(mif)
print(ramOut)
simulateCPU(ramIn, mif, ramOut, sTime, gui, verbose, rtlDir=rtlDir)
toc = time.time()
print(" ( {0:.2f} seconds)".format(toc-tic))
else:
logError("Arquivo de simulacao não encontrado :")
logError(" - {}".format(ramIn))
return(1)
else:
logError("Arquivo hack não encontrado :")
logError(" - {}".format(mif))
def simulateCPU(ramIn, romIn, ramOut, time, debug, verbose, rtlDir=PATH_WORK):
global OUT_SIM_LST
rtlDir = os.path.abspath(rtlDir)
PATH_DO = os.path.join(rtlDir, "do", "sim.do")
TEMP_IN_RAM_MIF = os.path.join(rtlDir, "tmpRAM.mif")
TEMP_IN_ROM_MIF = os.path.join(rtlDir, "tmpROM.mif")
OUT_RAM_MEM = os.path.join(rtlDir, "out", "RAM.mem")
OUT_ROM_MEM = os.path.join(rtlDir, "out", "ROM.mem")
# tosco, melhorar isso ! não pode ser global !
# mas o gui simulator usa, colocar como parametro ?
# ou criar uma classe
OUT_SIM_LST = os.path.join(rtlDir, "out", "SIM.lst")
ramIn = os.path.abspath(ramIn)
romIn = os.path.abspath(romIn)
ramOut = os.path.abspath(ramOut)
# try:
# os.remove(OUT_RAM_MEM)
# os.remove(OUT_ROM_MEM)
# os.remove(OUT_SIM_LST)
# print("removido")
# except:
# print("simulateCPU: Falha em remove arquivos")
# pass
# return(0)
try:
shutil.copyfile(ramIn, TEMP_IN_RAM_MIF)
shutil.copyfile(romIn, TEMP_IN_ROM_MIF)
except:
logError("Arquivos não encontrados :")
logError(" - {}".format(romIn))
logError(" - {}".format(ramIn))
return(1)
if PATH_VSIM is None:
logError("Configurar a variavel de ambiente : 'VUNIT_MODELSIM_PATH' ")
return(1)
setRuntimeDo(time, PATH_DO)
v = ""
if platform.system() == "Windows":
if verbose is False:
v = " > NUL "
else:
if verbose is False:
v = " > /dev/null "
c = ""
if debug is False:
c = " -c "
# executa simulacao no modelsim
owd = os.getcwd()
os.chdir(rtlDir)
os.system(PATH_VSIM + c + " -do " + PATH_DO + v)
os.chdir(owd)
shutil.copyfile(OUT_RAM_MEM, ramOut)
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-R", "--in_ram_mif", required=True, help="estado inicial da RAM no formato .mif ")
ap.add_argument("-P", "--in_rom_mif", required=True, help="estado inicial da ROM no formato .mif ")
ap.add_argument("-O", "--out_ram", required=True, help="diretorio para saída das simulacoes")
ap.add_argument("-T", "--time_ns", required=True, help="Tempo em ns da simulacao")
ap.add_argument("-d", "--debug", required=False, action='store_true', help="open modelsim window")
ap.add_argument("-v", "--verbose", required=False, action='store_true', help="shows modelsim output")
args = vars(ap.parse_args())
if args["debug"]:
debug = True
else:
debug = False
if args["verbose"]:
verbose = True
else:
verbose = False
simulateCPU(ramIn=args["in_ram_mif"],
romIn=args["in_rom_mif"],
ramOut=args["out_ram"],
time=args["time_ns"],
debug=debug,
verbose=verbose)
|
the-stack_0_27337
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import math
import distribution1D
import crowtools
# initialize distribution container
def keep_going_function(monitored, controlled, auxiliary): return auxiliary.keepGoing
def restart_function(monitored, controlled, auxiliary):
# here we store some critical parameters that we want in the output
auxiliary.CladFailureDistThreshold = distributions.CladFailureDist.getVariable('ProbabilityThreshold')
auxiliary.CladTempBranched = distributions.CladFailureDist.inverseCdf(auxiliary.CladFailureDistThreshold)
auxiliary.crew1DG1Threshold = distributions.crew1DG1.getVariable('ProbabilityThreshold')
auxiliary.DG1recoveryTime = distributions.crew1DG1.inverseCdf(auxiliary.crew1DG1Threshold)
auxiliary.PrimPGrecoveryThreshold = distributions.PrimPGrecovery.getVariable('ProbabilityThreshold')
auxiliary.PrimPGrecoveryTime = distributions.PrimPGrecovery.inverseCdf(auxiliary.PrimPGrecoveryThreshold)
auxiliary.crewSecPGThreshold = distributions.crewSecPG.getVariable('ProbabilityThreshold')
auxiliary.SecPGrecoveryTime = distributions.crewSecPG.inverseCdf(auxiliary.crewSecPGThreshold)
auxiliary.deltaAux = min(auxiliary.DG1recoveryTime,auxiliary.SecPGrecoveryTime,auxiliary.PrimPGrecoveryTime)
# here we check the variables one by one (for the aux)
return
def control_function(monitored, controlled, auxiliary):
if auxiliary.CladDamaged and not auxiliary.AuxSystemUp:
auxiliary.keepGoing = False
# here we check the variables one by one (for the aux)
if monitored.time>=auxiliary.scram_start_time:
auxiliary.ScramStatus = True
print('SCRAM')
else:
auxiliary.ScramStatus = False
print('OPERATIONAL STATE')
if (auxiliary.crew1DG1) and not auxiliary.AuxSystemUp:
auxiliary.AuxSystemUp = True
if (auxiliary.PrimPGrecovery) and not auxiliary.AuxSystemUp:
auxiliary.AuxSystemUp = True
if (auxiliary.crewSecPG) and not auxiliary.AuxSystemUp:
auxiliary.AuxSystemUp = True
#if auxiliary.CladDamaged:
# if monitored.time_step > 1:
# raise NameError ('exit condition reached - failure of the clad')
if auxiliary.ScramStatus:
#we are in scram
#primary pump B
if controlled.Head_PumpB>1.e-4*8.9:
if not auxiliary.AuxSystemUp:
# not yet auxiliary system up
controlled.Head_PumpB = tools.PumpCoastDown.compute(monitored.time-auxiliary.scram_start_time)
if controlled.Head_PumpB < (1.e-4*8.9):
controlled.Head_PumpB = 1.e-4*8.9
controlled.friction1_SC_B = auxiliary.frict_m*controlled.Head_PumpB + auxiliary.frict_q
auxiliary.init_exp_frict = True
else:
#system up
controlled.friction1_SC_B = controlled.friction1_SC_B - 62.5
if controlled.friction1_SC_B < 0.1:
controlled.friction1_SC_B = 0.1
if controlled.Head_PumpB <= 0.10*8.9:
controlled.Head_PumpB = controlled.Head_PumpB + 0.00125*8.9
if controlled.Head_PumpB > 0.10*8.9:
controlled.Head_PumpB = 0.10*8.9
else:
controlled.Head_PumpB = tools.PumpCoastDown.compute(monitored.time-auxiliary.scram_start_time)
if controlled.Head_PumpB < (0.10*8.9):
controlled.Head_PumpB = 0.10*8.9 #
else:
# auxiliary.Head_PumpB<1.e-4*8.9
if not auxiliary.AuxSystemUp:
# not yet auxiliary system up
controlled.Head_PumpB = 1.e-4*8.9
controlled.friction1_SC_B = 10000
else:
# auxiliary system up
controlled.friction1_SC_B = controlled.friction1_SC_B - 62.5
if controlled.friction1_SC_B < 0.1:
controlled.friction1_SC_B = 0.1
if controlled.Head_PumpB <= 0.10*8.9:
controlled.Head_PumpB = controlled.Head_PumpB + 0.00125*8.9
if controlled.Head_PumpB > 0.10*8.9:
controlled.Head_PumpB = 0.10*8.9
else:
controlled.Head_PumpB = tools.PumpCoastDown.compute(monitored.time-auxiliary.scram_start_time)
if controlled.Head_PumpB < (0.10*8.9):
controlled.Head_PumpB = 0.10*8.9
#primary pump A
controlled.Head_PumpA = controlled.Head_PumpB
controlled.friction2_SC_B = controlled.friction1_SC_B
controlled.friction1_CL_B = controlled.friction1_SC_B
controlled.friction2_CL_B = controlled.friction1_SC_B
controlled.friction1_SC_A = controlled.friction1_SC_B
controlled.friction2_SC_A = controlled.friction1_SC_B
controlled.friction1_CL_A = controlled.friction1_SC_B
controlled.friction2_CL_A = controlled.friction1_SC_B
print(controlled.friction1_SC_B)
#secondary system replaced by auxiliary secondary system
if not auxiliary.AuxSystemUp and auxiliary.ScramStatus:
# not yet auxiliary system up
print('not yet auxiliary system up')
controlled.MassFlowRateIn_SC_B = 4.542*tools.PumpCoastDownSec.compute(monitored.time-auxiliary.scram_start_time)
controlled.MassFlowRateIn_SC_A = 4.542*tools.PumpCoastDownSec.compute(monitored.time-auxiliary.scram_start_time)
if controlled.MassFlowRateIn_SC_A < (1.e-4*4.542):
controlled.MassFlowRateIn_SC_A = 1.e-4*4.542
controlled.MassFlowRateIn_SC_B = 1.e-4*4.542
if auxiliary.AuxSystemUp and auxiliary.ScramStatus:
# auxiliary system up
print('auxiliary system up')
controlled.MassFlowRateIn_SC_B = 4.542*0.10
if controlled.MassFlowRateIn_SC_B <= 0.10*4.542:
controlled.MassFlowRateIn_SC_B = 0.10*4.542
else:
controlled.MassFlowRateIn_SC_B = 4.542*tools.PumpCoastDownSec.compute(monitored.time-auxiliary.scram_start_time)
if controlled.MassFlowRateIn_SC_B <= 0.10*4.542:
controlled.MassFlowRateIn_SC_B = 0.10*4.542
controlled.MassFlowRateIn_SC_A = controlled.MassFlowRateIn_SC_B
if auxiliary.ScramStatus:
#core power following decay heat curve
controlled.power_CH1 = auxiliary.init_Power_Fraction_CH1*tools.DecayHeatScalingFactor.compute(monitored.time-auxiliary.scram_start_time)
controlled.power_CH2 = auxiliary.init_Power_Fraction_CH2*tools.DecayHeatScalingFactor.compute(monitored.time-auxiliary.scram_start_time)
controlled.power_CH3 = auxiliary.init_Power_Fraction_CH3*tools.DecayHeatScalingFactor.compute(monitored.time-auxiliary.scram_start_time)
return
def dynamic_event_tree(monitored, controlled, auxiliary):
if monitored.time_step <= 1:
return
if distributions.CladFailureDist.checkCdf(monitored.avg_temp_clad_CH1) and (not auxiliary.CladDamaged) and (not auxiliary.AuxSystemUp):
auxiliary.CladDamaged = True
return
if distributions.CladFailureDist.checkCdf(monitored.avg_temp_clad_CH2) and (not auxiliary.CladDamaged) and (not auxiliary.AuxSystemUp):
auxiliary.CladDamaged = True
return
if distributions.CladFailureDist.checkCdf(monitored.avg_temp_clad_CH3) and (not auxiliary.CladDamaged) and (not auxiliary.AuxSystemUp):
auxiliary.CladDamaged = True
return
if distributions.crew1DG1.checkCdf(monitored.time - auxiliary.scram_start_time ) and (not auxiliary.CladDamaged) and (not auxiliary.crew1DG1) and (not auxiliary.AuxSystemUp):
auxiliary.crew1DG1 = True
return
if distributions.PrimPGrecovery.checkCdf(monitored.time - auxiliary.scram_start_time) and (not auxiliary.CladDamaged) and (not auxiliary.PrimPGrecovery) and (not auxiliary.AuxSystemUp):
auxiliary.PrimPGrecovery = True
return
if distributions.crewSecPG.checkCdf(monitored.time - auxiliary.scram_start_time) and (not auxiliary.CladDamaged) and (not auxiliary.crewSecPG) and (not auxiliary.AuxSystemUp):
auxiliary.crewSecPG = True
return
return
|
the-stack_0_27338
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\@[\w\-\.]+:\S+?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(r"\n\s*Invalid command:"),
re.compile(r"\nCommit failed"),
re.compile(r"\n\s+Set failed"),
]
terminal_length = os.getenv('ANSIBLE_VYOS_TERMINAL_LENGTH', 10000)
def on_open_shell(self):
try:
for cmd in ['set terminal length 0', 'set terminal width 512']:
self._exec_cli_command(cmd)
self._exec_cli_command('set terminal length %s' % self.terminal_length)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
the-stack_0_27340
|
def test_name_version_wheel():
from pylibimport.utils import get_name_version
filename = 'dynamicmethod-1.0.3-py3-none-any.whl'
name, version = get_name_version(filename)
assert name == 'dynamicmethod', '{} != {}'.format(name, 'dynamicmethod')
assert version == '1.0.3', '{} != {}'.format(version, '1.0.3')
filename = 'dynamicmethod-1.0.3-cp36-cp36m-any.whl'
name, version = get_name_version(filename)
assert name == 'dynamicmethod', '{} != {}'.format(name, 'dynamicmethod')
assert version == '1.0.3', '{} != {}'.format(version, '1.0.3')
filename = 'dynamicmethod-1.0.3dev2-cp36-cp36m-any.whl'
name, version = get_name_version(filename)
assert name == 'dynamicmethod', '{} != {}'.format(name, 'dynamicmethod')
assert version == '1.0.3dev2', '{} != {}'.format(version, '1.0.3dev2')
def test_name_version_zip():
from pylibimport.utils import get_name_version
filename = 'dynamicmethod-1.0.2.tar.gz'
name, version = get_name_version(filename)
assert name == 'dynamicmethod', '{} != {}'.format(name, 'dynamicmethod')
assert version == '1.0.2', '{} != {}'.format(version, '1.0.2')
filename = 'dynamicmethod-1.0.2.zip'
name, version = get_name_version(filename)
assert name == 'dynamicmethod'
assert version == '1.0.2', '{} != {}'.format(version, '1.0.2')
def test_name_version_py():
from pylibimport.utils import get_name_version
filename = 'custom.py'
name, version = get_name_version(filename)
assert name == 'custom', '{} != {}'.format(name, 'custom')
assert version == '0.0.0', '{} != {}'.format(version, '0.0.0')
def test_name_version_setup_py():
import pathlib
from pylibimport.utils import get_name_version, parse_setup
from pylibimport import __meta__
filename = str(pathlib.Path().absolute().parent.joinpath('setup.py'))
name, version = None, None
try:
meta = parse_setup(filename)
name, version = meta['name'], meta['version']
except (ImportError, PermissionError, FileNotFoundError, KeyError, Exception):
pass
assert name == __meta__.name, '{} != {}'.format(name, __meta__.name)
assert version == __meta__.version, '{} != {}'.format(version, __meta__.version)
filename = '../setup.py' # Local setup.py
name, version = get_name_version(filename)
assert name == __meta__.name, '{} != {}'.format(name, __meta__.name)
assert version == __meta__.version, '{} != {}'.format(version, __meta__.version)
def test_name_version_meta():
import os
from pylibimport.utils import parse_meta
from pylibimport import __meta__
filename = '../setup.py' # Local setup.py
name, version = None, None
# Try finding the __meta__.py file to read. This is the only way I can read the proper name and version.
filename = os.path.dirname(filename)
for fname in os.listdir(filename):
try:
meta = parse_meta(os.path.join(filename, fname, '__meta__.py'))
name, version = meta['name'], meta['version']
break
except (FileNotFoundError, PermissionError, TypeError, KeyError, Exception):
pass
assert name == __meta__.name, '{} != {}'.format(name, __meta__.name)
assert version == __meta__.version, '{} != {}'.format(version, __meta__.version)
def test_get_compatibility_tags():
import sys
import re
from pylibimport.utils import get_compatibility_tags, is_compatible
pyver = '{}{}'.format(sys.version_info[0], sys.version_info[1])
filename = 'dynamicmethod-1.0.2rc1-cp{0}-cp{0}m-win_amd64.whl'.format(pyver)
get_compatibility_tags(filename)
pyver = '{}{}'.format(sys.version_info[0], sys.version_info[1])
filename = 'dynamicmethod-1.0.2rc1-cp{0}-cp{0}m-win_amd64.whl'.format(pyver)
assert is_compatible(filename)
filename = 'dynamicmethod-1.0.2rc1-cp{0}-cp{0}m-win_amd64.whl'.format(10)
assert not is_compatible(filename)
if __name__ == '__main__':
test_name_version_wheel()
test_name_version_zip()
test_name_version_py()
test_name_version_setup_py()
test_name_version_meta()
test_get_compatibility_tags()
print('All tests finished successfully!')
|
the-stack_0_27341
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
sf = vtk.vtkSplitField()
sf.SetInputData(output)
sf.SetInputField("VECTORS","POINT_DATA")
sf.Split(0,"vx")
sf.Split(1,"vy")
sf.Split(2,"vz")
#sf.Print()
aax = vtk.vtkAssignAttribute()
aax.SetInputConnection(sf.GetOutputPort())
aax.Assign("vx","SCALARS","POINT_DATA")
isoVx = vtk.vtkContourFilter()
isoVx.SetInputConnection(aax.GetOutputPort())
isoVx.SetValue(0,.38)
normalsVx = vtk.vtkPolyDataNormals()
normalsVx.SetInputConnection(isoVx.GetOutputPort())
normalsVx.SetFeatureAngle(45)
isoVxMapper = vtk.vtkPolyDataMapper()
isoVxMapper.SetInputConnection(normalsVx.GetOutputPort())
isoVxMapper.ScalarVisibilityOff()
isoVxMapper.ImmediateModeRenderingOn()
isoVxActor = vtk.vtkActor()
isoVxActor.SetMapper(isoVxMapper)
isoVxActor.GetProperty().SetColor(1,0.7,0.6)
aay = vtk.vtkAssignAttribute()
aay.SetInputConnection(sf.GetOutputPort())
aay.Assign("vy","SCALARS","POINT_DATA")
isoVy = vtk.vtkContourFilter()
isoVy.SetInputConnection(aay.GetOutputPort())
isoVy.SetValue(0,.38)
normalsVy = vtk.vtkPolyDataNormals()
normalsVy.SetInputConnection(isoVy.GetOutputPort())
normalsVy.SetFeatureAngle(45)
isoVyMapper = vtk.vtkPolyDataMapper()
isoVyMapper.SetInputConnection(normalsVy.GetOutputPort())
isoVyMapper.ScalarVisibilityOff()
isoVyMapper.ImmediateModeRenderingOn()
isoVyActor = vtk.vtkActor()
isoVyActor.SetMapper(isoVyMapper)
isoVyActor.GetProperty().SetColor(0.7,1,0.6)
aaz = vtk.vtkAssignAttribute()
aaz.SetInputConnection(sf.GetOutputPort())
aaz.Assign("vz","SCALARS","POINT_DATA")
isoVz = vtk.vtkContourFilter()
isoVz.SetInputConnection(aaz.GetOutputPort())
isoVz.SetValue(0,.38)
normalsVz = vtk.vtkPolyDataNormals()
normalsVz.SetInputConnection(isoVz.GetOutputPort())
normalsVz.SetFeatureAngle(45)
isoVzMapper = vtk.vtkPolyDataMapper()
isoVzMapper.SetInputConnection(normalsVz.GetOutputPort())
isoVzMapper.ScalarVisibilityOff()
isoVzMapper.ImmediateModeRenderingOn()
isoVzActor = vtk.vtkActor()
isoVzActor.SetMapper(isoVzMapper)
isoVzActor.GetProperty().SetColor(0.4,0.5,1)
mf = vtk.vtkMergeFields()
mf.SetInputConnection(sf.GetOutputPort())
mf.SetOutputField("merged","POINT_DATA")
mf.SetNumberOfComponents(3)
mf.Merge(0,"vy",0)
mf.Merge(1,"vz",0)
mf.Merge(2,"vx",0)
#mf.Print()
aa = vtk.vtkAssignAttribute()
aa.SetInputConnection(mf.GetOutputPort())
aa.Assign("merged","SCALARS","POINT_DATA")
aa2 = vtk.vtkAssignAttribute()
aa2.SetInputConnection(aa.GetOutputPort())
aa2.Assign("SCALARS","VECTORS","POINT_DATA")
sl = vtk.vtkStreamTracer()
sl.SetInputConnection(aa2.GetOutputPort())
sl.SetStartPosition(2,-2,26)
sl.SetMaximumPropagation(40)
sl.SetInitialIntegrationStep(0.2)
sl.SetIntegrationDirectionToForward()
rf = vtk.vtkRibbonFilter()
rf.SetInputConnection(sl.GetOutputPort())
rf.SetWidth(1.0)
rf.SetWidthFactor(5)
slMapper = vtk.vtkPolyDataMapper()
slMapper.SetInputConnection(rf.GetOutputPort())
slMapper.ImmediateModeRenderingOn()
slActor = vtk.vtkActor()
slActor.SetMapper(slMapper)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(isoVxActor)
isoVxActor.AddPosition(0,12,0)
ren1.AddActor(isoVyActor)
ren1.AddActor(isoVzActor)
isoVzActor.AddPosition(0,-12,0)
ren1.AddActor(slActor)
slActor.AddPosition(0,24,0)
ren1.AddActor(outlineActor)
outlineActor.AddPosition(0,24,0)
ren1.SetBackground(.8,.8,.8)
renWin.SetSize(320,320)
ren1.GetActiveCamera().SetPosition(-20.3093,20.55444,64.3922)
ren1.GetActiveCamera().SetFocalPoint(8.255,0.0499763,29.7631)
ren1.GetActiveCamera().SetViewAngle(30)
ren1.GetActiveCamera().SetViewUp(0,0,1)
ren1.GetActiveCamera().Dolly(0.4)
ren1.ResetCameraClippingRange()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
the-stack_0_27343
|
import collections
import logging
import os
import re
import textwrap
import time
import weakref
import sublime
from sublime_lib import encodings, ResourcePath
from ..lib.weakmethod import WeakMethodProxy
from ..lib import get_setting
from .region_math import VALUE_SCOPE, get_value_region_at, get_last_key_name_from
logger = logging.getLogger(__name__)
PREF_FILE = "Preferences.sublime-settings"
PREF_FILE_ALIAS = "Base File.sublime-settings"
KIND_SETTING = (sublime.KIND_ID_VARIABLE, "S", "Setting")
def html_encode(string):
"""Encode some critical characters to html entities."""
return string.replace("&", "&") \
.replace("<", "<") \
.replace(">", ">") \
.replace("\t", " ") \
.replace(" ", " ") \
.replace("\n", "<br>") if string else ""
def format_completion_item(value, default=None, is_default=False, label=None, annotation=None):
"""Create a completion item with its type as description.
Arguments:
value (any):
The value which is added when completions are committed.
If `label` is none, the `value` is used as label, too.
default (any):
Sets is_default if equals `value`.
is_default (bool):
If `True` the completion item is marked '(default)'.
label (str):
An alternative label to use to present the `value`
in the completions panel.
annotation (str):
An optional annotation to display after the label.
"""
if isinstance(value, dict):
raise ValueError("Cannot format dictionary value", value)
if not is_default:
is_default = value in default if isinstance(default, list) else value == default
type_ = type(value).__name__
return sublime.CompletionItem(
trigger=sublime.encode_value(label or value).strip('"'),
annotation=("(default) {}" if is_default else "{}").format(annotation or ""),
completion=value,
kind=(sublime.KIND_ID_SNIPPET, type_[0], type_),
)
def decode_value(string):
"""Decode string to python object with unrestrictive booleans."""
if string.lower() == "true":
return True
if string.lower() == "false":
return False
try:
return int(string)
except ValueError:
return float(string)
class KnownSettings(object):
"""A class which provides all known settings with comments/defaults.
An object of this class initialized with a sublime-settings file loads all
basefiles from all packages including comments and default values to
provide all required information for tooltips and auto-completion.
"""
# cache for instances, keyed by the basename
# and using weakrefs for easy garbage collection
cache = weakref.WeakValueDictionary()
_is_initialized = False
_is_loaded = False
filename = None
on_loaded_callbacks = None
on_loaded_once_callbacks = None
defaults = None
comments = None
fallback_settings = None
def __new__(cls, filename, on_loaded=None, **kwargs):
# __init__ will be called on the return value
obj = cls.cache.get(filename)
if obj:
logger.debug("cache hit %r", filename)
return cls.cache[filename]
else:
obj = super().__new__(cls, **kwargs)
cls.cache[filename] = obj
return obj
def __init__(self, filename):
"""Initialize view event listener object.
Arguments:
filename (str):
Settings file name to index.
"""
# Because __init__ may be called multiple times
# and we only want to trigger a reload once,
# we need special handling here.
if not self._is_initialized:
# the associated settings file name all the settings belong to
self.filename = filename
# callback lists
self.on_loaded_callbacks = []
self.on_loaded_once_callbacks = []
self._is_initialized = True
# the dictionary with all defaults of a setting
self.defaults = collections.ChainMap()
# the dictionary with all comments of each setting
self.comments = collections.ChainMap()
self.trigger_settings_reload()
def add_on_loaded(self, on_loaded, once=False):
"""Add a callback to call once settings have been indexed (asynchronously).
Bound methods are stored as weak references.
Arguments:
on_loaded (callable):
The callback.
once (bool):
Whether the callback should be called only once.
"""
# Due to us archiving the callback, we use a weakref
# to avoid a circular reference to all SettingListeners affected,
# ensuring our __del__ is properly called when all relevant views are closed.
if self._is_loaded:
# Invoke callback 'immediately' since we're already loaded.
# Note that this is currently not thread-safe.
sublime.set_timeout_async(on_loaded, 0)
if not once:
self.on_loaded_callbacks.append(WeakMethodProxy(on_loaded))
elif not self._is_loaded:
self.on_loaded_once_callbacks.append(WeakMethodProxy(on_loaded))
def __del__(self):
logger.debug("deleting KnownSettings instance for %r", self.filename)
def __iter__(self):
"""Iterate over default keys."""
return iter(self.defaults)
def trigger_settings_reload(self):
# look for settings files asynchronously
sublime.set_timeout_async(self._load_settings, 0)
def _load_settings(self):
"""Load and merge settings and their comments from all base files.
The idea is each package which wants to add a valid entry to the
`Preferences.sublime-settings` file must provide such a file with all
keys it wants to add. These keys and the associated comments above it
are loaded into dictionaries and used to provide tooltips, completions
and linting.
"""
ignored_patterns = frozenset(("/User/", "/Preferences Editor/"))
# TODO project settings include "Preferences",
# but we don't have a syntax def for those yet
logger.debug("loading defaults and comments for %r", self.filename)
start_time = time.time()
resources = sublime.find_resources(self.filename)
resources += sublime.find_resources(self.filename + "-hints")
if self.filename == PREF_FILE:
resources += sublime.find_resources(PREF_FILE_ALIAS)
logger.debug("found %d %r files", len(resources), self.filename)
for resource in resources:
if any(ignored in resource for ignored in ignored_patterns):
logger.debug("ignoring %r", resource)
continue
try:
logger.debug("parsing %r", resource)
lines = sublime.load_resource(resource).splitlines()
for key, value in self._parse_settings(lines).items():
# merge settings without overwriting existing ones
self.defaults.setdefault(key, value)
except Exception as e:
logger.error("error parsing %r - %s%r",
resource, e.__class__.__name__, e.args)
duration = time.time() - start_time
logger.debug("loading took %.3fs", duration)
# include general settings if we're in a syntax-specific file
is_syntax_specific = self._is_syntax_specific()
if is_syntax_specific and not self.fallback_settings:
self.fallback_settings = KnownSettings(PREF_FILE)
# add fallbacks to the ChainMaps
self.defaults.maps.append(self.fallback_settings.defaults)
self.comments.maps.append(self.fallback_settings.comments)
# these may be loaded later, so delay calling our own callbacks
self.fallback_settings.add_on_loaded(self._has_loaded, once=True)
else:
if self.fallback_settings and not is_syntax_specific:
# file was renamed, probably
self.fallback_settings = None
self.defaults.maps.pop()
self.comments.maps.pop()
self._has_loaded()
def _has_loaded(self):
self._is_loaded = True
for callback in self.on_loaded_once_callbacks:
try:
callback()
except ReferenceError:
pass
self.on_loaded_once_callbacks.clear()
# copy callback list so we can clean up expired references
for callback in tuple(self.on_loaded_callbacks):
try:
callback()
except ReferenceError:
logger.debug("removing gone-away weak on_loaded_callback reference")
self.on_loaded_callbacks.remove(callback)
def _is_syntax_specific(self):
"""Check whether a syntax def with the same base file name exists.
Returns:
bool
"""
syntax_file_exts = (".sublime-syntax", ".tmLanguage")
name_no_ext = os.path.splitext(self.filename)[0]
for ext in syntax_file_exts:
syntax_file_name = name_no_ext + ext
resources = sublime.find_resources(syntax_file_name)
if resources:
logger.debug("syntax-specific settings file for %r", resources[0])
return True
return False
def _parse_settings(self, lines):
"""Parse the setting file and capture comments.
This is naive but gets the job done most of the time.
"""
content = []
comment = []
in_comment = False
for line in lines:
stripped = line.strip()
if in_comment:
if stripped.endswith("*/"):
in_comment = False
# remove all spaces and asterix
line = line.rstrip("*/ \t")
if line:
comment.append(line)
elif stripped.startswith("* "):
comment.append(stripped[2:])
else:
comment.append(line)
continue
# ignore empty lines if not in a comment
# empty line in comment may be used as visual separator
elif not stripped:
continue
if stripped.startswith("/*"):
in_comment = True
# remove all asterix
stripped = stripped[2:].lstrip("*")
if stripped:
comment.append(stripped)
continue
if stripped.startswith("//"):
# skip comment lines ending with `//` (likely used as separators)
# a standalone `//` adds an empty line as visual separator
stripped = stripped[2:]
if not stripped or not stripped.endswith("//"):
comment.append(stripped)
continue
content.append(line)
if comment:
# the json key is used as key for the comments located above it
match = re.match(r'"((?:[^"]|\\.)*)":', stripped)
if not match:
continue
key = match.group(1)
if key not in self.comments:
self.comments[key] = textwrap.dedent('\n'.join(comment))
comment.clear()
# Return decoded json file from content with stripped comments
return sublime.decode_value('\n'.join(content))
def build_tooltip(self, view, key):
"""Return html encoded docstring for settings key.
Arguments:
view (sublime.View):
the view to provide completions for
key (string):
the key under the cursor
"""
if key in self.defaults:
# the comment for the setting
comment = html_encode(self.comments.get(key) or "No description.")
# the default value from base file
default = html_encode(
sublime.encode_value(self.defaults.get(key), pretty=True))
else:
comment, default = "No description.", "unknown setting"
# format tooltip html content
return (
"<h1>{key}</h1>"
"<h2>Default: {default}</h2>"
"<p>{comment}</p>"
).format(**locals())
def insert_snippet(self, view, key):
"""Insert a snippet for the settings key at the end of the view.
Arguments:
view (sublime.View):
The view to add the snippet to. Doesn't need to be the view
of this ViewEventHandler. It's more likely the view of the
user settings which is to be passed here.
key (string):
The settings key to insert a snippet for.
"""
# find last value in the view
value_regions = view.find_by_selector(VALUE_SCOPE)
if not value_regions:
# no value found use end of global dict
selector = "meta.mapping"
value_regions = view.find_by_selector(selector)
if not value_regions:
# no global dict found, insert one
point = view.size()
is_empty_line = not view.substr(view.line(point)).strip()
bol = "{\n\t" if is_empty_line else "\n{\n\t"
eol = ",$0\n}\n"
else:
# insert first value to user file
point = value_regions[-1].end() - 1
bol, eol = "\t", "\n"
else:
# find line with last non-whitespace characters
value_region = value_regions[-1]
value_str = view.substr(value_region)
value_str_trimmed = value_str.rstrip()
ws_length = len(value_str) - len(value_str_trimmed)
point = view.line(value_region.end() - ws_length).end()
if value_str_trimmed.endswith(","):
# already have a comma after last entry
bol, eol = "\n", ","
else:
# add a comma after last entry
bol, eol = ",\n", ""
# format and insert the snippet
snippet = self._key_snippet(key, self.defaults[key], bol, eol)
view.sel().clear()
view.sel().add(point)
view.run_command('insert_snippet', {'contents': snippet})
def key_completions(self, view, prefix, point):
"""Create a list with completions for all known settings.
Arguments:
view (sublime.View):
the view to provide completions for
prefix (string):
the line content before cursor
point (int):
the text positions of all characters in prefix
Returns:
tuple ([ (trigger, content), (trigger, content) ], flags):
the tuple with content ST needs to display completions
"""
if view.match_selector(point - 1, "string"):
# we are within quotations, return words only
completions = [
sublime.CompletionItem(
trigger=key,
completion=key,
kind=KIND_SETTING,
# TODO link to show full description
# details=,
)
for key in self.defaults
]
else:
line = view.substr(view.line(point)).strip()
# don't add newline after snippet if user starts on empty line
eol = "," if len(line) == len(prefix) else ",\n"
# no quotations -> return full snippet
completions = [
sublime.CompletionItem(
trigger=key,
completion=self._key_snippet(key, value, eol=eol),
completion_format=sublime.COMPLETION_FORMAT_SNIPPET,
kind=KIND_SETTING,
# TODO link to show full description
# details=,
)
for key, value in self.defaults.items()
]
return completions
@staticmethod
def _encode_snippet_field_default_content(content: str) -> str:
"""Escape string for snippet's default content context."""
return content \
.replace("\\", "\\\\") \
.replace("$", "\\$") \
.replace("}", "\\}")
@staticmethod
def _key_snippet(key, value, bol="", eol=",\n"):
"""Create snippet with default value depending on type.
Arguments:
key (string):
the settings key name
value (any):
the default value of the setting read from base file
bol (string):
the prefix to add to the beginning of line
eol (string):
the suffix to add to the end of line
Returns:
string: the contents field to insert into completions entry
"""
encoded = sublime.encode_value(value, pretty=True).strip()
if isinstance(value, str):
# create the snippet for json strings and exclude quotation marks
# from the input field {1:}
#
# "key": "value"
#
fmt = '{bol}"{key}": "${{1:{content}}}"{eol}'
encoded = encoded[1:-1]
elif isinstance(value, list):
# create the snippet for json lists and exclude brackets
# from the input field {1:}
#
# "key":
# [
# value
# ]
#
fmt = '{bol}"{key}":\n[\n\t${{1:{content}}}\n]{eol}'
encoded = encoded[1:-1].strip()
elif isinstance(value, dict):
# create the snippet for json dictionaries braces
# from the input field {1:}
#
# "key":
# {
# value
# }
#
fmt = '{bol}"{key}":\n{{\n\t${{1:{content}}}\n}}{eol}'
encoded = encoded[1:-1].strip()
else:
fmt = '{bol}"{key}": ${{1:{content}}}{eol}'
# `encoded` is like a JSON string, but with quotes/braces/brackets stripped.
content = KnownSettings._encode_snippet_field_default_content(encoded)
return fmt.format(key=key, content=content, bol=bol, eol=eol)
def value_completions(self, view, prefix, point):
"""Create a list with completions for all known settings values.
Arguments:
view (sublime.View):
the view to provide completions for
prefix (string):
the line content before cursor.
point (int):
the text positions of all characters in prefix
Returns:
tuple ([ (trigger, content), (trigger, content) ], flags):
the tuple with content ST needs to display completions
"""
value_region = get_value_region_at(view, point)
if not value_region:
logger.debug("unable to find current key region")
return None
key = get_last_key_name_from(view, value_region.begin())
if not key:
logger.debug("unable to find current key")
return None
# Use a map to deduplicate completions by trigger; latter overrides
completions_map = {c.trigger: c for c in self._value_completions_for(key)}
completions = list(completions_map.values())
if not completions:
logger.debug("no completions to offer")
return None
is_str = any(
bool(isinstance(c.completion, str)
or (isinstance(c.completion, list)
and c.completion
and isinstance(c.completion[0], str)))
for c in completions
)
in_str = view.match_selector(point, "string")
logger.debug("completing a string (%s) within a string (%s)", is_str, in_str)
is_list = isinstance(self.defaults.get(key), list)
in_list = view.match_selector(point, "meta.sequence")
logger.debug("completing a list item (%s) within a list (%s)", is_list, in_list)
if in_str and not is_str:
# We're within a string but don't have a string value to complete.
# Complain about this in the status bar, I guess.
msg = "Cannot complete value set within a string"
view.window().status_message(msg)
logger.warning(msg)
return None
if in_str and is_str:
# Strip completions of non-strings. Don't need quotation marks.
completions = [
c for c in completions
if isinstance(c.completion, str)
]
else:
# JSON-ify completion values with special handling for floats.
#
# the value typed so far, which may differ from prefix for floats
typed_region = sublime.Region(value_region.begin(), point)
typed = view.substr(typed_region).lstrip()
for c in completions:
value = c.completion
# unroll dicts
if isinstance(value, frozenset):
value = dict(value)
if isinstance(value, float):
# strip already typed text from float completions
# because ST cannot complete past word boundaries
# (e.g. strip `1.` of `1.234`)
value_str = str(value)
if value_str.startswith(typed):
offset = len(typed) - len(prefix)
value_str = value_str[offset:]
elif typed:
# don't offer as completion if 'typed' didn't match
continue
else:
value_str = sublime.encode_value(value)
if is_list and not in_list:
# wrap each item in a brackets to insert a 'list'
value_str = "[{}]".format(value_str)
# escape snippet markers
value_str = value_str.replace("$", "\\$")
c.completion = value_str
# disable word completion to prevent stupid suggestions
return completions
def _value_completions_for(self, key):
"""Collect and return value completions from matching source.
Arguments:
key (string):
the settings key name to read comments from
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
logger.debug("building completions for key %r", key)
default = self.defaults.get(key)
logger.debug("default value: %r", default)
if key in ('color_scheme', 'dark_color_scheme', 'light_color_scheme'):
yield from self._color_scheme_completions(key, default)
elif key in ('default_encoding', 'fallback_encoding'):
yield from self._encoding_completions(default)
elif key in ('theme', 'dark_theme', 'light_theme'):
yield from self._theme_completions(key, default)
else:
yield from self._completions_from_comment(key, default)
yield from self._completions_from_default(key, default)
def _completions_from_comment(self, key, default):
"""Parse settings comments and return all possible values.
Many settings are commented with a list of quoted words representing
the possible / allowed values. This method generates a list of these
quoted words which are suggested in auto-completions.
Arguments:
key (string):
the settings key name to read comments from
default (any):
the default value of the setting used to mark completion items
as "default".
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
comment = self.comments.get(key)
if not comment:
return
for match in re.finditer(r"`([^`\n]+)`", comment):
# backticks should wrap the value in JSON representation,
# so we try to decode it
value = match.group(1)
try:
value = sublime.decode_value(value)
except ValueError:
pass
if isinstance(value, list):
# Suggest list items as completions instead of a string
# representation of the list.
# Unless it's a dict.
for v in value:
if not isinstance(v, dict):
yield format_completion_item(v, default)
elif isinstance(value, dict):
# TODO what should we do with dicts?
pass
else:
yield format_completion_item(value, default)
for match in re.finditer(r'"([\.\w]+)"', comment):
# quotation marks either wrap a string, a numeric or a boolean
# fall back to a str
value, = match.groups()
try:
value = decode_value(value)
except ValueError:
pass
yield format_completion_item(value, default)
@staticmethod
def _completions_from_default(key, default):
"""Built completions from default value.
Arguments:
key (string):
the settings key name to read comments from
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
if default is None or default == "":
return
elif isinstance(default, bool):
for value in [True, False]:
yield format_completion_item(value, default=default)
elif isinstance(default, list):
for value in default:
yield format_completion_item(value, is_default=True)
elif isinstance(default, dict):
return # TODO can't complete these yet
else:
yield format_completion_item(default, is_default=True)
@staticmethod
def _color_scheme_completions(key, default):
"""Create completions of all visible color schemes.
The set will not include color schemes matching at least one entry of
`"settings.exclude_color_scheme_patterns": []`.
default (string):
The default `color_scheme` value.
Returns:
{(trigger, contents], ...}
A set of all completions.
- trigger (string): base file name of the color scheme
- contents (string): the value to commit to the settings
"""
if int(sublime.version()) >= 4095 and key == 'color_scheme':
yield format_completion_item(value="auto", annotation="dark-/light switching")
hidden = get_setting('settings.exclude_color_scheme_patterns') or []
for scheme_path in sublime.find_resources("*.sublime-color-scheme"):
if not any(hide in scheme_path for hide in hidden):
try:
root, package, *_, name = scheme_path.split("/")
except ValueError:
continue
if root == 'Cache':
continue
yield format_completion_item(value=name, default=default, annotation=package)
for scheme_path in sublime.find_resources("*.tmTheme"):
if not any(hide in scheme_path for hide in hidden):
try:
root, package, *_, name = scheme_path.split("/")
except ValueError:
continue
if root == 'Cache':
continue
yield format_completion_item(
value=scheme_path, default=default, label=name, annotation=package
)
@staticmethod
def _encoding_completions(default):
"""Create completions of all available encoding values.
default (string):
The default `encoding` value.
Returns:
{(trigger, contents), ...}
A set of all completions.
- trigger (string): the encoding in sublime format
- contents (string): the encoding in sublime format
"""
for enc in encodings.SUBLIME_TO_STANDARD.keys():
yield format_completion_item(value=enc, default=default, annotation="encoding")
@staticmethod
def _theme_completions(key, default):
"""Create completions of all visible themes.
default (string):
The default `theme` value.
The set will not include color schemes matching at least one entry of
`"settings.exclude_theme_patterns": []` setting.
Returns:
{(trigger, contents), ...}
A set of all completions.
- trigger (string): base file name of the theme
- contents (string): the file name to commit to the settings
"""
hidden = get_setting('settings.exclude_theme_patterns') or []
if int(sublime.version()) >= 4095 and key == 'theme':
yield format_completion_item(value="auto", annotation="dark-/light switching")
for theme_path in ResourcePath.glob_resources("*.sublime-theme"):
if not any(hide in theme_path.name for hide in hidden):
yield format_completion_item(
value=theme_path.name, default=default, annotation="theme"
)
|
the-stack_0_27344
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for using the poisson bootstrap API."""
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis import types
from tensorflow_model_analysis.evaluators import legacy_poisson_bootstrap as poisson_bootstrap
class PoissonBootstrapTest(tf.test.TestCase):
def testCalculateConfidenceInterval(self):
sampling_data_list = [
np.array([
[0, 0, 2, 7, 0.77777779, 1],
[1, 0, 2, 6, 0.75, 0.85714287],
[4, 0, 2, 3, 0.60000002, 0.42857143],
[4, 2, 0, 3, 1, 0.42857143],
[7, 2, 0, 0, float('nan'), 0],
]),
np.array([
[7, 2, 0, 0, float('nan'), 0],
[0, 0, 2, 7, 0.77777779, 1],
[1, 0, 2, 6, 0.75, 0.85714287],
[4, 0, 2, 3, 0.60000002, 0.42857143],
[4, 2, 0, 3, 1, 0.42857143],
]),
]
unsampled_data = np.array([
[4, 2, 0, 3, 1, 0.42857143],
[7, 2, 0, 0, float('nan'), 0],
[0, 0, 2, 7, 0.77777779, 1],
[1, 0, 2, 6, 0.75, 0.85714287],
[4, 0, 2, 3, 0.60000002, 0.42857143],
])
result = poisson_bootstrap._calculate_t_distribution(
sampling_data_list, unsampled_data)
self.assertIsInstance(result, np.ndarray)
self.assertEqual(result.shape, (5, 6))
self.assertAlmostEqual(result[0][0].sample_mean, 3.5, delta=0.1)
self.assertAlmostEqual(
result[0][0].sample_standard_deviation, 4.94, delta=0.1)
self.assertEqual(result[0][0].sample_degrees_of_freedom, 1)
self.assertEqual(result[0][0].unsampled_value, 4.0)
self.assertAlmostEqual(result[0][4].sample_mean, 0.77, delta=0.1)
self.assertTrue(np.isnan(result[0][4].sample_standard_deviation))
self.assertEqual(result[0][4].sample_degrees_of_freedom, 0)
self.assertEqual(result[0][4].unsampled_value, 1.0)
sampling_data_list = [
np.array([1, 2]),
np.array([1, 2]),
np.array([1, float('nan')])
]
unsampled_data = np.array([1, 2])
result = poisson_bootstrap._calculate_t_distribution(
sampling_data_list, unsampled_data)
self.assertIsInstance(result, np.ndarray)
self.assertEqual(result.tolist(), [
types.ValueWithTDistribution(
sample_mean=1.0,
sample_standard_deviation=0.0,
sample_degrees_of_freedom=2,
unsampled_value=1),
types.ValueWithTDistribution(
sample_mean=2.0,
sample_standard_deviation=0.0,
sample_degrees_of_freedom=1,
unsampled_value=2)
])
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_27345
|
"""
homeassistant.components.switch.wemo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for WeMo switches.
"""
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import STATE_ON, STATE_OFF, STATE_STANDBY
REQUIREMENTS = ['pywemo==0.3']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return WeMo switches. """
import pywemo
import pywemo.discovery as discovery
if discovery_info is not None:
device = discovery.device_from_description(discovery_info[2])
if device:
add_devices_callback([WemoSwitch(device)])
return
logging.getLogger(__name__).info("Scanning for WeMo devices")
switches = pywemo.discover_devices()
# Filter out the switches and wrap in WemoSwitch object
add_devices_callback(
[WemoSwitch(switch) for switch in switches
if isinstance(switch, pywemo.Switch)])
class WemoSwitch(SwitchDevice):
""" Represents a WeMo switch. """
def __init__(self, wemo):
self.wemo = wemo
self.insight_params = None
self.maker_params = None
@property
def unique_id(self):
""" Returns the id of this WeMo switch """
return "{}.{}".format(self.__class__, self.wemo.serialnumber)
@property
def name(self):
""" Returns the name of the switch if any. """
return self.wemo.name
@property
def state(self):
""" Returns the state. """
is_on = self.is_on
if not is_on:
return STATE_OFF
elif self.is_standby:
return STATE_STANDBY
return STATE_ON
@property
def current_power_mwh(self):
""" Current power usage in mwh. """
if self.insight_params:
return self.insight_params['currentpower']
@property
def today_power_mw(self):
""" Today total power usage in mw. """
if self.insight_params:
return self.insight_params['todaymw']
@property
def is_standby(self):
""" Is the device on - or in standby. """
if self.insight_params:
standby_state = self.insight_params['state']
# Standby is actually '8' but seems more defensive
# to check for the On and Off states
if standby_state == '1' or standby_state == '0':
return False
else:
return True
@property
def sensor_state(self):
""" Is the sensor on or off. """
if self.maker_params and self.has_sensor:
# Note a state of 1 matches the WeMo app 'not triggered'!
if self.maker_params['sensorstate']:
return STATE_OFF
else:
return STATE_ON
@property
def switch_mode(self):
""" Is the switch configured as toggle(0) or momentary (1). """
if self.maker_params:
return self.maker_params['switchmode']
@property
def has_sensor(self):
""" Is the sensor present? """
if self.maker_params:
return self.maker_params['hassensor']
@property
def is_on(self):
""" True if switch is on. """
return self.wemo.get_state()
def turn_on(self, **kwargs):
""" Turns the switch on. """
self.wemo.on()
def turn_off(self):
""" Turns the switch off. """
self.wemo.off()
def update(self):
""" Update WeMo state. """
self.wemo.get_state(True)
if self.wemo.model_name == 'Insight':
self.insight_params = self.wemo.insight_params
self.insight_params['standby_state'] = self.wemo.get_standby_state
elif self.wemo.model_name == 'Maker':
self.maker_params = self.wemo.maker_params
|
the-stack_0_27346
|
# Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Optional
from mock import MagicMock
import tft.artemis.guest
from tft.artemis.tasks import DoerType, TaskLogger
def assert_task_core_call(
task_core: MagicMock,
taskname: str,
doer: DoerType,
*doer_args: Any,
test_guest_logger: Optional[str] = None
) -> None:
"""
Test properties of a mock representing :py:func:`tft.artemis.task.task_core`.
The helper verifies several properties of the mock, assuming it has been used to dispatch a task doer.
"""
# Right, should have been called already.
task_core.assert_called_once()
# We can't use `assert_called_once_with()` because we have no access to objects passed to the `task_core()` call.
# Therefore unpacking the store call information, and testing call properties "manually".
_, args, kwargs = task_core.mock_calls[0]
# There's one positional argument only, and that's the given doer.
assert args == (doer,)
# Its arguments are given as a keyword argument...
assert kwargs['doer_args'] == doer_args
# ... and then there's a task logger object.
assert isinstance(kwargs['logger'], TaskLogger)
assert kwargs['logger'].taskname == taskname
# Some tasks go even beyond task logger by creating a guest logger. If we were given a task name,
# let's verify guest logger has been created correctly.
if test_guest_logger:
assert isinstance(kwargs['logger']._logger, tft.artemis.guest.GuestLogger)
assert kwargs['logger']._logger.guestname == test_guest_logger
|
the-stack_0_27348
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ... import TestUnitBase
class TestSerpent(TestUnitBase):
def test_reversible(self):
data = bytes(range(0x100))
for mode in ('CBC', 'CFB', 'OFB', 'PCBC'):
encrypter = -self.load(range(0x20), iv=range(0x10), mode=mode)
decrypter = self.load(range(0x20), iv=range(0x10), mode=mode)
self.assertEqual(data, bytes(data | encrypter | decrypter), F'error for {mode}')
|
the-stack_0_27350
|
#!/usr/bin/env python3
import argparse
import openpyscad as ops
from MetricBolt import MetricBolt
from semicylinder import semicylinder
from Config import ReadConfig
config = ReadConfig('belt-tensioner.config')
AP = argparse.ArgumentParser(description="Create Belt Grip Parts")
AP.add_argument('--part', help='Which part to make', required=True)
AP.add_argument('--out', help='Output Filename')
args = AP.parse_args()
if not args.out:
args.out = 'belt_tensioner_' + str(args.part) + '.scad'
with open (args.out, 'w') as fh:
fh.write('$fn=180;\n')
d = ops.Difference()
u = ops.Union()
u.append(semicylinder(config.griplength, config.gripradius, config.gripdepth))
d.append(u)
b = MetricBolt(3, 12, negative=True)
c = b.cap
s = b.shaft
n = b.nut
o = config.beltwidth/2 + b.shaft.r
if args.part == 'top':
d.append(b.bolt.rotate([0,-90,0]).translate([b.cap.h+config.boltgrip,o,config.griplength/2]))
d.append(b.bolt.rotate([0,-90,0]).translate([b.cap.h+config.boltgrip,-o,config.griplength/2]))
elif args.part == 'bottom':
u.append(ops.Cube([config.gripdepth+config.boltgrip, config.beltwidth, config.griplength]).translate([0,-config.beltwidth/2,0]))
both = b.bolt.rotate([0,0,30]).rotate([0,90,0])
left = both.translate([-b.cap.h-b.shaft.h+config.boltgrip,o,config.griplength/2])
right = both.translate([-b.cap.h-b.shaft.h+config.boltgrip,-o,config.griplength/2])
d.append(left)
d.append(right)
for z in range(1, int(config.griplength), 2):
d.append(ops.Cube([1,config.beltwidth,1]).translate([0,-config.beltwidth/2,z]))
bolt = MetricBolt(3, config.griplength+2, negative=True)
bolt.faces = 4
d.append(bolt.bolt.rotate([0,0,45]).translate([config.gripdepth/2,0,-bolt.cap.h-3]))
d.rotate([0,-90,0]).dump(fh)
|
the-stack_0_27351
|
# Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import struct
def _make_packer(format_string):
packer = struct.Struct(format_string)
pack = packer.pack
unpack = lambda s: packer.unpack(s)[0]
return pack, unpack
int64_pack, int64_unpack = _make_packer('>q')
int32_pack, int32_unpack = _make_packer('>i')
int16_pack, int16_unpack = _make_packer('>h')
int8_pack, int8_unpack = _make_packer('>b')
uint64_pack, uint64_unpack = _make_packer('>Q')
uint32_pack, uint32_unpack = _make_packer('>I')
uint16_pack, uint16_unpack = _make_packer('>H')
uint8_pack, uint8_unpack = _make_packer('>B')
float_pack, float_unpack = _make_packer('>f')
double_pack, double_unpack = _make_packer('>d')
# Special case for cassandra header
header_struct = struct.Struct('>BBbB')
header_pack = header_struct.pack
header_unpack = header_struct.unpack
# in protocol version 3 and higher, the stream ID is two bytes
v3_header_struct = struct.Struct('>BBhB')
v3_header_pack = v3_header_struct.pack
v3_header_unpack = v3_header_struct.unpack
if six.PY3:
def varint_unpack(term):
val = int(''.join("%02x" % i for i in term), 16)
if (term[0] & 128) != 0:
val -= 1 << (len(term) * 8)
return val
else:
def varint_unpack(term): # noqa
val = int(term.encode('hex'), 16)
if (ord(term[0]) & 128) != 0:
val = val - (1 << (len(term) * 8))
return val
def bitlength(n):
bitlen = 0
while n > 0:
n >>= 1
bitlen += 1
return bitlen
def varint_pack(big):
pos = True
if big == 0:
return b'\x00'
if big < 0:
bytelength = bitlength(abs(big) - 1) // 8 + 1
big = (1 << bytelength * 8) + big
pos = False
revbytes = bytearray()
while big > 0:
revbytes.append(big & 0xff)
big >>= 8
if pos and revbytes[-1] & 0x80:
revbytes.append(0)
revbytes.reverse()
return six.binary_type(revbytes)
|
the-stack_0_27353
|
class Carro():
def arrancar(self):
print(f"arrancar con carro marca: {self.marca}, color: {self.color}")
#constructor
def __init__(self, marca, modelo, color, puertas, combustible):
self.marca = marca
self.modelo = modelo
self.color = color
self.puertas = puertas
self.combustible = combustible
carro_uno = Carro("Mazda","XXRR","Blanco", 4, "gasolina")
print(carro_uno.puertas)
print(carro_uno.color)
carro_uno.arrancar()
carro_dos = Carro("Ford","F-150","Rojo","6", "gasolina")
print(carro_dos.modelo)
print(carro_dos.puertas)
print(carro_dos.color)
carro_dos.arrancar()
print(carro_uno.puertas)
print(carro_uno.color)
carro_uno.arrancar()
|
the-stack_0_27354
|
""" Final Assisgnment """
import os
from ibm_watson import LanguageTranslatorV3, ApiException
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
def translator_instance():
""" Init translator Instance """
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
language_translator.set_disable_ssl_verification(True)
return language_translator
def english_to_french(english_text):
""" This method tranlate Text from English to French """
french_text = None
if not english_text:
return None
try:
language_translator = translator_instance()
translation = language_translator.translate(
text=english_text,
model_id='en-fr').get_result()
french_text = translation['translations'][0]['translation'] if translation else None
except ApiException as ex:
print("Method failed with status code " +
str(ex.code) + ": " + ex.message)
return french_text
def french_to_english(french_text):
""" This method tranlate Text from French to English """
english_text = None
if not french_text:
return None
try:
language_translator = translator_instance()
translation = language_translator.translate(
text=french_text,
model_id='fr-en').get_result()
english_text = translation['translations'][0]['translation'] if translation else None
except ApiException as ex:
print("Method failed with status code " +
str(ex.code) + ": " + ex.message)
return english_text
|
the-stack_0_27355
|
# -*- coding: utf-8 -*-
import os
from pkg_resources import iter_entry_points
from .base import TileSource, FileTileSource, TileOutputMimeTypes, \
TILE_FORMAT_IMAGE, TILE_FORMAT_PIL, TILE_FORMAT_NUMPY, nearPowerOfTwo, \
etreeToDict, dictToEtree
from ..exceptions import TileGeneralException, TileSourceException, TileSourceAssetstoreException
from .. import config
from ..constants import SourcePriority
AvailableTileSources = {}
def loadTileSources(entryPointName='large_image.source', sourceDict=AvailableTileSources):
"""
Load all tilesources from entrypoints and add them to the
AvailableTileSources dictionary.
:param entryPointName: the name of the entry points to load.
:param sourceDict: a dictionary to populate with the loaded sources.
"""
for entryPoint in iter_entry_points(entryPointName):
try:
sourceClass = entryPoint.load()
if sourceClass.name and None in sourceClass.extensions:
sourceDict[entryPoint.name] = sourceClass
config.getConfig('logprint').debug('Loaded tile source %s' % entryPoint.name)
except Exception:
config.getConfig('logprint').exception(
'Failed to loaded tile source %s' % entryPoint.name)
pass
def getTileSourceFromDict(availableSources, pathOrUri, *args, **kwargs):
"""
Get a tile source based on a ordered dictionary of known sources and a path
name or URI. Additional parameters are passed to the tile source and can
be used for properties such as encoding.
:param availableSources: an ordered dictionary of sources to try.
:param pathOrUri: either a file path or a fixed source via
large_image://<source>.
:returns: a tile source instance or and error.
"""
sourceObj = pathOrUri
uriWithoutProtocol = pathOrUri.split('://', 1)[-1]
isLargeImageUri = pathOrUri.startswith('large_image://')
extensions = [ext.lower() for ext in os.path.basename(uriWithoutProtocol).split('.')[1:]]
sourceList = []
for sourceName in availableSources:
sourceExtensions = availableSources[sourceName].extensions
priority = sourceExtensions.get(None, SourcePriority.MANUAL)
for ext in extensions:
if ext in sourceExtensions:
priority = min(priority, sourceExtensions[ext])
if isLargeImageUri and sourceName == uriWithoutProtocol:
priority = SourcePriority.NAMED
if priority >= SourcePriority.MANUAL:
continue
sourceList.append((priority, sourceName))
for _priority, sourceName in sorted(sourceList):
if availableSources[sourceName].canRead(sourceObj, *args, **kwargs):
return availableSources[sourceName](sourceObj, *args, **kwargs)
raise TileSourceException('No available tilesource for %s' % pathOrUri)
def getTileSource(*args, **kwargs):
"""
Get a tilesource using the known sources. If tile sources have not yet
been loaded, load them.
:returns: A tilesource for the passed arguments.
"""
if not len(AvailableTileSources):
loadTileSources()
return getTileSourceFromDict(AvailableTileSources, *args, **kwargs)
__all__ = [
'TileSource', 'FileTileSource',
'exceptions', 'TileGeneralException', 'TileSourceException', 'TileSourceAssetstoreException',
'TileOutputMimeTypes', 'TILE_FORMAT_IMAGE', 'TILE_FORMAT_PIL', 'TILE_FORMAT_NUMPY',
'AvailableTileSources', 'getTileSource', 'nearPowerOfTwo',
'etreeToDict', 'dictToEtree',
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.