repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
FedericoCeratto/debian-py3status | py3status/modules/dpms.py | 1 | 1188 | # -*- coding: utf-8 -*-
"""
This module allows activation and deactivation
of DPMS (Display Power Management Signaling)
by clicking on 'DPMS' in the status bar.
Written and contributed by @tasse:
Andre Doser <dosera AT tf.uni-freiburg.de>
"""
from os import system
class Py3status:
def __init__(self):
"""
Detect current state on start.
"""
self.run = system('xset -q | grep -iq "DPMS is enabled"') == 0
def dpms(self, i3s_output_list, i3s_config):
"""
Display a colorful state of DPMS.
"""
response = {
'full_text': 'DPMS'
}
if self.run:
response['color'] = i3s_config['color_good']
else:
response['color'] = i3s_config['color_bad']
return response
def on_click(self, i3s_output_list, i3s_config, event):
"""
Enable/Disable DPMS on left click.
"""
if event['button'] == 1:
if self.run:
self.run = False
system("xset -dpms")
else:
self.run = True
system("xset +dpms")
system("killall -USR1 py3status")
| bsd-2-clause | -3,580,776,968,557,446,700 | 25.4 | 70 | 0.526936 | false |
MrKepzie/Natron-Tests | TestPY/test___01.py | 1 | 2726 | from __future__ import print_function
import NatronEngine
f1 = open('test___01-output.txt','w+')
reader = app.createNode("fr.inria.openfx.ReadOIIO")
writer = app.createNode("fr.inria.openfx.WriteOIIO")
blur = app.createNode("net.sf.cimg.CImgBlur")
reader = app.Read1
writer = app.Write1
blur = app.BlurCImg1
print(blur.getScriptName(), file=f1)
reader.filename.set("input1.png")
writer.filename.set("output#.jpg")
writer.connectInput(0,blur)
writer.formatType.set(0)
writer.frameRange.set(2)
writer.firstFrame.set(1)
writer.lastFrame.set(1)
writer.quality.set(10)
if blur.canConnectInput(0,reader):
print("can connect", file=f1)
blur.connectInput(0,reader)
blur.size.setValueAtTime(0,1)
blur.size.setValueAtTime(10,2)
blur.size.setValueAtTime(20,3)
blur.size.set(30,30,4)
print(blur.size.getDefaultValue(), file=f1)
print(blur.size.get().x, file=f1)
print(blur.size.get().y, file=f1)
print(blur.size.getValue(), file=f1)
print(blur.size.get(2).x, file=f1)
print(blur.size.get(2).y, file=f1)
print(blur.size.get(3).x, file=f1)
print(blur.size.get(3).y, file=f1)
print(blur.size.get(4).x, file=f1)
print(blur.size.get(4).y, file=f1)
print(blur.size.get(5).x, file=f1)
print(blur.size.get(5).y, file=f1)
print(blur.size.getValueAtTime(1), file=f1)
print(blur.size.getValueAtTime(2), file=f1)
print(blur.size.getValueAtTime(3), file=f1)
print(blur.size.getValueAtTime(4), file=f1)
print(blur.size.getValueAtTime(5), file=f1)
if (NatronEngine.natron.getNatronVersionMajor() == 2 and NatronEngine.natron.getNatronVersionMinor() >= 3) or NatronEngine.natron.getNatronVersionMajor() >= 3:
availLay = blur.getAvailableLayers(-1)
else
availLay = blur.getAvailableLayers()
if len(availLay) > 0:
print ("getAvailableLayers", file=f1)
if blur.addUserPlane("MyLayer",["R", "G", "B", "A"]):
print("added user plane", file=f1)
print(str(blur.getBitDepth()), file=f1)
getCol=str(blur.getColor())
if getCol:
print("getColor", file=f1)
print(str(blur.getCurrentTime()), file=f1)
print(str(blur.getFrameRate()), file=f1)
getIn = blur.getInput(0)
print(str(getIn.getLabel()), file=f1)
print(str(blur.getInputLabel(0)), file=f1)
print(str(blur.getMaxInputCount()), file=f1)
sizeParam = blur.getParam("size")
print(str(sizeParam.getCanAnimate()), file=f1)
print(str(sizeParam.getEvaluateOnChange()), file=f1)
print(str(sizeParam.getHelp()), file=f1)
print(str(sizeParam.getIsAnimationEnabled()), file=f1)
print(str(sizeParam.getIsEnabled(0.0)), file=f1)
print(str(sizeParam.getIsPersistent()), file=f1)
print(str(sizeParam.getIsVisible()), file=f1)
print(str(sizeParam.getNumDimensions()), file=f1)
print(str(sizeParam.getScriptName()), file=f1)
print(str(sizeParam.getTypeName()), file=f1)
| gpl-2.0 | -3,707,290,140,379,969,500 | 29.629213 | 159 | 0.731842 | false |
YutingZhang/lmdis-rep | net_modules/auto_struct/generic_decoder.py | 1 | 14641 | from abc import ABCMeta, abstractmethod
import tensorflow as tf
import prettytensor as pt
import zutils.tf_math_funcs as tmf
import net_modules.auto_struct.utils as asu
from net_modules import keypoints_2d
from net_modules.distribution_utils import reshape_extended_features
from zutils.py_utils import *
import zutils.pt_utils as ptu
import numpy as np
class Factory:
__metaclass__ = ABCMeta
structure_param_num = None
feature_extended_num = 1
def __init__(self, recon_dist_param_num=1, options=None):
self.recon_dist_param_num = recon_dist_param_num
self.options = options
self.allow_overall = True
# patch feature dim (utilities for inherent class)
if hasattr(self, "default_patch_feature_dim"):
self.patch_feature_dim = self.default_patch_feature_dim
else:
self.patch_feature_dim = None
if "patch_feature_dim" in self.options:
self.patch_feature_dim = self.options["patch_feature_dim"]
# overall feature dim (utilities for inherent class)
if hasattr(self, "default_overall_feature_dim"):
self.overall_feature_dim = self.default_overall_feature_dim
else:
self.overall_feature_dim = None
if "overall_feature_dim" in self.options:
self.patch_feature_dim = self.options["overall_feature_dim"]
def __call__(self, input_tensor, condition_tensor=None, extra_inputs=None):
im, _, mos = self.decode(input_tensor, condition_tensor=condition_tensor, extra_inputs=extra_inputs)
return im, mos.extra_outputs
def decode(self, input_tensor, condition_tensor=None, extra_inputs=None):
"""Create encoder network.
"""
if extra_inputs is None:
extra_inputs = dict()
assert self.structure_param_num is not None, "structure_param_num is not defined"
# module output strip
mos = asu.ModuleOutputStrip()
with tf.variable_scope("variational"):
# latent to structure, patch, and overall
structure_param_x, patch_features_x, overall_features_x = mos(
self._latent2structure_patch_overall(latent_tensor=input_tensor))
structure_param_x = mos(self.structure_postprocess(structure_param_x))
actual_structure_param_num = tmf.get_shape(structure_param_x)[-1]
the_param_factor = (actual_structure_param_num // self.structure_param_num)
assert the_param_factor == self.feature_extended_num, "wrong dim for feature extension"
structure_param_x, structure_param = reshape_extended_features(structure_param_x, the_param_factor)
patch_features_x, patch_features = reshape_extended_features(patch_features_x, the_param_factor)
overall_features_x, overall_features = reshape_extended_features(overall_features_x, the_param_factor)
# store structure_param
mos.extra_outputs["save"]["structure_param"] = structure_param
# feed to discriminator (if needed)
mos.extra_outputs["for_discriminator"]["structure_param"] = structure_param
input_structure_param = structure_param
input_patch_features = patch_features
input_overall_features = overall_features
if "override_structure_param" in extra_inputs and extra_inputs["override_structure_param"] is not None:
input_structure_param = extra_inputs["override_structure_param"]
if "override_patch_features" in extra_inputs and extra_inputs["override_patch_features"] is not None:
input_patch_features = extra_inputs["override_patch_features"]
if "override_overall_features" in extra_inputs and extra_inputs["override_overall_features"] is not None:
input_overall_features = extra_inputs["override_overall_features"]
input_structure_param0 = input_structure_param
input_structure_param, input_overall_features, input_patch_features = \
self.rotate_dominating_features_if_necessary(
condition_tensor, input_structure_param, input_overall_features, input_patch_features
)
if input_structure_param is not input_structure_param0:
# save actual used structure param
mos.extra_outputs["save"]["structure_param0"] = mos.extra_outputs["save"]["structure_param"]
mos.extra_outputs["save"]["structure_param"] = structure_param
if self.input_feature_dim() is not None:
input_patch_features = pt.wrap(input_patch_features).group_connected(
self.input_feature_dim(),
tie_groups=self.options["tie_patch_feature_spaces"]
if "tie_patch_feature_spaces" in self.options else False
)
im, _ = self.decode_deterministic(
input_structure_param,
input_patch_features,
input_overall_features,
extra_inputs=extra_inputs,
mos=mos
)
detailed_outputs = dict(
structure_param_x=structure_param_x,
structure_param=structure_param,
patch_features_x=patch_features_x,
patch_features=patch_features,
overall_features_x=overall_features_x,
overall_features=overall_features
)
return im, detailed_outputs, mos
def decode_deterministic(
self, structure_param, patch_features, overall_features, extra_inputs=None, mos=None,
default_reuse=None
):
if not self.allow_overall:
assert overall_features is None, "Do not support overall_features"
if mos is None:
mos = asu.ModuleOutputStrip()
with tf.variable_scope("deterministic", reuse=default_reuse):
# build heatmap
raw_heatmap_list = mos(self.structure2heatmap(structure_param, extra_inputs=extra_inputs))
if not isinstance(raw_heatmap_list, (list, tuple)):
raw_heatmap_list = [raw_heatmap_list]
heatmap_list = list()
for the_heatmap in raw_heatmap_list:
heatmap_list.append(the_heatmap)
mos.extra_outputs["save"]["heatmap"] = heatmap_list[0]
heatmap = tf.concat(heatmap_list, axis=3)
# build feature map (if needed)
if patch_features is not None:
# patch_features: [batch_size, struct_num, channels]
# heatmap: [batch_size, h, w, struct_num]
patch_features_e = tmf.expand_dims(patch_features, axis=1, ndims=2)
feature_map_list = list()
for the_heatmap in heatmap_list:
the_heatmap_e = tf.expand_dims(the_heatmap, axis=-1)
the_feature_map = tf.reduce_sum(patch_features_e * the_heatmap_e, axis=3)
feature_map_list.append(the_feature_map)
feature_map = tf.concat(feature_map_list, axis=3)
feature_map = tf.concat([heatmap, feature_map], axis=3)
else:
feature_map = heatmap
im = mos(self.feature2image_with_overall(feature_map, overall_features))
im = call_func_with_ignored_args(
self.post_image_reconstruction, im, extra_inputs=extra_inputs
)
return im, mos
def structure_postprocess(self, structure_param):
return structure_param, None
def _latent2structure_patch_overall(self, latent_tensor):
a = self.__latent2structure_patch_overall(latent_tensor)
assert isinstance(a, tuple), "wrong output type"
assert a[0] is not None, "it seems the latent to structure mapping is not defined"
if len(a) == 4:
return a
elif len(a) == 3:
return a + (None,)
else:
raise ValueError("wrong number of outputs")
# exactly one of the following four methods should be overridden -------------
def __latent2structure_patch_overall(self, latent_tensor):
struct_patch_overall = self.latent2structure_patch_overall(latent_tensor)
struct_patch = self.latent2structure_patch(latent_tensor)
struct_overall = self.latent2structure_overall(latent_tensor)
struct_only = self.latent2structure(latent_tensor)
user_def_embedding = list(
filter(lambda x: x is not None, [struct_patch_overall, struct_patch, struct_overall, struct_only]))
if not user_def_embedding:
struct_patch_overall = self.latent2structure_patch_overall_generic(latent_tensor)
if struct_patch_overall is not None:
user_def_embedding.append(struct_patch_overall)
assert len(user_def_embedding) == 1, \
"exactly one of latent2structure_* should be override"
def wrap_from_1(a):
if isinstance(a, tuple):
if len(a) == 1:
return a[0], None, None, None
elif len(a) == 2:
return a[0], None, None, a[1]
else:
raise ValueError("wrong number of outputs")
else:
return a, None, None, None
def wrap_from_2(a, at_third=False):
assert isinstance(a, tuple), "wrong output type"
if len(a) == 2:
return (a[0],) + ((None, a[1]) if at_third else (a[1], None)) + (None,)
elif len(a) == 3:
return (a[0],) + ((None, a[1]) if at_third else (a[1], None)) + (a[2],)
else:
raise ValueError("wrong number of outputs")
def wrap_from_3(a):
assert isinstance(a, tuple), "wrong output type"
if len(a) == 3:
return a + (None,)
else:
return a
if struct_patch_overall is not None:
return wrap_from_3(struct_patch_overall)
if struct_patch is not None:
return wrap_from_2(struct_patch, at_third=False)
elif struct_overall is not None:
return wrap_from_2(struct_overall, at_third=True)
elif struct_only is not None:
return wrap_from_1(struct_only)
else:
raise ValueError("internal errors: did not find any actual definition")
def rotate_dominating_features_if_necessary(self, condition_tensor, structure_param, *args):
if "keypoint_diff_factor" in self.options:
self.options["structure_diff_factor"] = self.options["keypoint_diff_factor"]
if "keypoint_rotating_indexes" in self.options:
self.options["structure_rotating_indexes"] = self.options["keypoint_rotating_indexes"]
sample_id_for_dominating_feature = Factory._sample_id_for_dominating_feature_from_condition(
condition_tensor
)
outputs = list(args)
if ptu.default_phase() != pt.Phase.test or sample_id_for_dominating_feature is None:
outputs = (structure_param,) + tuple(outputs)
return outputs
for i in range(len(outputs)):
if outputs[i] is not None:
outputs[i] = tf.tile(
tf.expand_dims(outputs[i][sample_id_for_dominating_feature], axis=0),
[tmf.get_shape(outputs[i])[0]] + [1] * (len(tmf.get_shape(outputs[i]))-1)
)
batch_size = tmf.get_shape(structure_param)[0]
num_structure = tmf.get_shape(structure_param)[1]
use_selected_index = "structure_rotating_indexes" in self.options and self.options["structure_rotating_indexes"]
if use_selected_index:
chosen_dim_idxb = np.zeros(num_structure, np.bool_)
chosen_dim_idxb[self.options["structure_rotating_indexes"]] = np.True_
chosen_ndim = np.sum(chosen_dim_idxb)
else:
chosen_dim_idxb = np.ones(num_structure, np.bool_)
chosen_ndim = num_structure
structure_tile_vec = [batch_size] + [1]*(len(tmf.get_shape(structure_param))-1)
chosen_dim_idxb = np.expand_dims(chosen_dim_idxb, axis=0)
for _ in range(len(tmf.get_shape(structure_param))-2):
chosen_dim_idxb = np.expand_dims(chosen_dim_idxb, axis=-1)
chosen_dim_idxbi = chosen_dim_idxb.astype(np.int32)
chosen_dim_idxb_x = np.tile(chosen_dim_idxb, structure_tile_vec)
ref_structure_param = tf.expand_dims(structure_param[sample_id_for_dominating_feature], axis=0)
if "structure_diff_factor" in self.options:
structure_diff_factor = self.options["structure_diff_factor"] \
if self.options["structure_diff_factor"] is not None else 1
if structure_diff_factor != 1:
structure_param = \
ref_structure_param * (1-structure_diff_factor) + structure_param * structure_diff_factor
if use_selected_index:
structure_param = chosen_dim_idxbi * structure_param + (1-chosen_dim_idxbi) * ref_structure_param
outputs = (structure_param,) + tuple(outputs)
return outputs
@staticmethod
def _sample_id_for_dominating_feature_from_condition(condition_tensor):
if condition_tensor is None:
return None
for c in condition_tensor:
if c["type"] == "boolmask_for_dominating_sample_in_batch_rotating":
the_id = tf.argmax(tf.to_int32(c["value"]), 0)
the_id = tf.reshape(the_id, [])
return the_id
return None
def latent2structure_patch_overall_generic(self, latent_tensor):
# automatic being overrided if one of the following four function got overrided
return None
def latent2structure_patch_overall(self, latent_tensor):
return None
def latent2structure_overall(self, latent_tensor):
return None
def latent2structure_patch(self, latent_tensor):
return None
def latent2structure(self, latent_tensor):
return None
# ------------------------------------------------------------------------------
@abstractmethod
def structure2heatmap(self, structure_param, extra_inputs=None):
pass
# override one of the following -----------------------------------------------
def feature2image_with_overall(self, feature_map, overall_feature):
return self.feature2image(feature_map)
def feature2image(self, feature_map):
raise ValueError("feature2image is not defined")
return None
def input_feature_dim(self):
return None
def post_image_reconstruction(self, im, extra_inputs=None):
return im
| apache-2.0 | -3,684,738,132,048,602,000 | 42.966967 | 120 | 0.612868 | false |
jptomo/rpython-lang-scheme | rpython/jit/metainterp/test/test_ajit.py | 1 | 142027 | import sys
import py
import weakref
from rpython.rlib import rgc
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.jit.metainterp import history
from rpython.jit.metainterp.test.support import LLJitMixin, noConst
from rpython.jit.metainterp.warmspot import get_stats
from rpython.rlib import rerased
from rpython.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside,
loop_invariant, elidable, promote, jit_debug, assert_green,
AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff,
isconstant, isvirtual, set_param, record_exact_class)
from rpython.rlib.longlong2float import float2longlong, longlong2float
from rpython.rlib.rarithmetic import ovfcheck, is_valid_int, int_force_ge_zero
from rpython.rtyper.lltypesystem import lltype, rffi
class BasicTests:
def test_basic(self):
def f(x, y):
return x + y
res = self.interp_operations(f, [40, 2])
assert res == 42
def test_basic_inst(self):
class A:
pass
def f(n):
a = A()
a.x = n
return a.x
res = self.interp_operations(f, [42])
assert res == 42
def test_uint_floordiv(self):
from rpython.rlib.rarithmetic import r_uint
def f(a, b):
a = r_uint(a)
b = r_uint(b)
return a/b
res = self.interp_operations(f, [-4, 3])
assert res == long(r_uint(-4)) // 3
def test_direct_call(self):
def g(n):
return n + 2
def f(a, b):
return g(a) + g(b)
res = self.interp_operations(f, [8, 98])
assert res == 110
def test_direct_call_with_guard(self):
def g(n):
if n < 0:
return 0
return n + 2
def f(a, b):
return g(a) + g(b)
res = self.interp_operations(f, [8, 98])
assert res == 110
def test_loop_1(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
res += x
y -= 1
return res
res = self.meta_interp(f, [6, 7])
assert res == 42
self.check_trace_count(1)
self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2,
'guard_true': 2, 'int_sub': 2})
if self.basic:
found = 0
for op in get_stats().get_all_loops()[0]._all_operations():
if op.getopname() == 'guard_true':
liveboxes = op.getfailargs()
assert len(liveboxes) == 3
for box in liveboxes:
assert box.type == 'i'
found += 1
assert found == 2
def test_loop_variant_mul1(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
res += x * x
x += 1
res += x * x
y -= 1
return res
res = self.meta_interp(f, [6, 7])
assert res == 1323
self.check_trace_count(1)
self.check_simple_loop(int_mul=1)
def test_loop_variant_mul_ovf(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
try:
res += ovfcheck(x * x)
x += 1
res += ovfcheck(x * x)
y -= 1
except OverflowError:
assert 0
return res
res = self.meta_interp(f, [6, 7])
assert res == 1323
self.check_trace_count(1)
self.check_simple_loop(int_mul_ovf=1)
def test_loop_invariant_mul1(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
res += x * x
y -= 1
return res
res = self.meta_interp(f, [6, 7])
assert res == 252
self.check_trace_count(1)
self.check_simple_loop(int_mul=0)
self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2,
'int_mul': 1, 'guard_true': 2, 'int_sub': 2})
def test_loop_invariant_mul_ovf1(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
b = y * 2
try:
res += ovfcheck(x * x) + b
except OverflowError:
assert 0
y -= 1
return res
res = self.meta_interp(f, [6, 7])
assert res == 308
self.check_trace_count(1)
self.check_simple_loop(int_mul_ovf=0)
self.check_resops({'jump': 1, 'int_lshift': 2, 'int_gt': 2,
'int_mul_ovf': 1, 'int_add': 4,
'guard_true': 2, 'guard_no_overflow': 1,
'int_sub': 2})
def test_loop_invariant_mul_bridge1(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'n'])
def f(x, y, n):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, n=n, res=res)
myjitdriver.jit_merge_point(x=x, y=y, n=n, res=res)
res += x * x
if y<n:
x += 1
y -= 1
return res
res = self.meta_interp(f, [6, 32, 16])
assert res == 3427
self.check_trace_count(3)
def test_loop_invariant_mul_bridge_maintaining1(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'n'])
def f(x, y, n):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res, n=n)
myjitdriver.jit_merge_point(x=x, y=y, res=res, n=n)
res += x * x
if y<n:
res += 1
y -= 1
return res
res = self.meta_interp(f, [6, 32, 16])
assert res == 1167
self.check_trace_count(3)
self.check_resops(int_mul=3)
def test_loop_invariant_mul_bridge_maintaining2(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'n'])
def f(x, y, n):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res, n=n)
myjitdriver.jit_merge_point(x=x, y=y, res=res, n=n)
z = x * x
res += z
if y<n:
res += z
y -= 1
return res
res = self.meta_interp(f, [6, 32, 16])
assert res == 1692
self.check_trace_count(3)
self.check_resops(int_mul=3)
def test_loop_invariant_mul_bridge_maintaining3(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x', 'm'])
def f(x, y, m):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res, m=m)
myjitdriver.jit_merge_point(x=x, y=y, res=res, m=m)
z = x * x
res += z
if y<m:
res += z
y -= 1
return res
res = self.meta_interp(f, [6, 32, 16])
assert res == 1692
self.check_trace_count(3)
self.check_resops({'int_lt': 4, 'int_gt': 4, 'guard_false': 2,
'guard_true': 6, 'int_sub': 4, 'jump': 3,
'int_mul': 3, 'int_add': 4})
def test_loop_invariant_mul_ovf2(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
b = y * 2
try:
res += ovfcheck(x * x) + b
except OverflowError:
res += 1
y -= 1
return res
res = self.meta_interp(f, [sys.maxint, 7])
assert res == f(sys.maxint, 7)
self.check_trace_count(1)
res = self.meta_interp(f, [6, 7])
assert res == 308
def test_loop_invariant_mul_bridge_ovf1(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2'])
def f(x1, x2, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res)
myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res)
try:
res += ovfcheck(x1 * x1)
except OverflowError:
res += 1
if y<32 and (y>>2)&1==0:
x1, x2 = x2, x1
y -= 1
return res
res = self.meta_interp(f, [6, sys.maxint, 48])
self.check_trace_count(6)
assert res == f(6, sys.maxint, 48)
def test_loop_invariant_mul_bridge_ovf2(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n'])
def f(x1, x2, n, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n)
myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n)
try:
res += ovfcheck(x1 * x1)
except OverflowError:
res += 1
y -= 1
if y&4 == 0:
x1, x2 = x2, x1
return res
res = self.meta_interp(f, [6, sys.maxint, 32, 48])
assert res == f(6, sys.maxint, 32, 48)
res = self.meta_interp(f, [sys.maxint, 6, 32, 48])
assert res == f(sys.maxint, 6, 32, 48)
def test_loop_invariant_intbox(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
class I:
__slots__ = 'intval'
_immutable_ = True
def __init__(self, intval):
self.intval = intval
def f(i, y):
res = 0
x = I(i)
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
res += x.intval * x.intval
y -= 1
return res
res = self.meta_interp(f, [6, 7])
assert res == 252
self.check_trace_count(1)
self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2,
'getfield_gc_pure_i': 1, 'int_mul': 1,
'guard_true': 2, 'int_sub': 2})
def test_loops_are_transient(self):
import gc, weakref
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
res += x
if y%2:
res *= 2
y -= 1
return res
wr_loops = []
old_init = history.TreeLoop.__init__.im_func
try:
def track_init(self, name):
old_init(self, name)
wr_loops.append(weakref.ref(self))
history.TreeLoop.__init__ = track_init
res = self.meta_interp(f, [6, 15], no_stats=True)
finally:
history.TreeLoop.__init__ = old_init
assert res == f(6, 15)
gc.collect()
assert not [wr for wr in wr_loops if wr()]
def test_string(self):
def f(n):
bytecode = 'adlfkj' + chr(n)
if n < len(bytecode):
return bytecode[n]
else:
return "?"
res = self.interp_operations(f, [1])
assert res == ord("d") # XXX should be "d"
res = self.interp_operations(f, [6])
assert res == 6
res = self.interp_operations(f, [42])
assert res == ord("?")
def test_chr2str(self):
def f(n):
s = chr(n)
return s[0]
res = self.interp_operations(f, [3])
assert res == 3
def test_unicode(self):
def f(n):
bytecode = u'adlfkj' + unichr(n)
if n < len(bytecode):
return bytecode[n]
else:
return u"?"
res = self.interp_operations(f, [1])
assert res == ord(u"d") # XXX should be "d"
res = self.interp_operations(f, [6])
assert res == 6
res = self.interp_operations(f, [42])
assert res == ord(u"?")
def test_char_in_constant_string(self):
def g(string):
return '\x00' in string
def f():
if g('abcdef'): return -60
if not g('abc\x00ef'): return -61
return 42
res = self.interp_operations(f, [])
assert res == 42
self.check_operations_history({'finish': 1}) # nothing else
def test_residual_call(self):
@dont_look_inside
def externfn(x, y):
return x * y
def f(n):
return externfn(n, n+1)
res = self.interp_operations(f, [6])
assert res == 42
self.check_operations_history(int_add=1, int_mul=0, call_i=1, guard_no_exception=0)
def test_residual_call_elidable(self):
def externfn(x, y):
return x * y
externfn._elidable_function_ = True
def f(n):
promote(n)
return externfn(n, n+1)
res = self.interp_operations(f, [6])
assert res == 42
# CALL_PURE is not recorded in the history if all-constant args
self.check_operations_history(int_add=0, int_mul=0,
call_i=0, call_pure_i=0)
def test_residual_call_elidable_1(self):
@elidable
def externfn(x, y):
return x * y
def f(n):
return externfn(n, n+1)
res = self.interp_operations(f, [6])
assert res == 42
# CALL_PURE is recorded in the history if not-all-constant args
self.check_operations_history(int_add=1, int_mul=0,
call_i=0, call_pure_i=1)
def test_residual_call_elidable_2(self):
myjitdriver = JitDriver(greens = [], reds = ['n'])
@elidable
def externfn(x):
return x - 1
def f(n):
while n > 0:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
n = externfn(n)
return n
res = self.meta_interp(f, [7])
assert res == 0
# CALL_PURE is recorded in the history, but turned into a CALL
# by optimizeopt.py
self.check_resops(call_pure_i=0, call_i=2, int_sub=0)
def test_constfold_call_elidable(self):
myjitdriver = JitDriver(greens = ['m'], reds = ['n'])
@elidable
def externfn(x):
return x - 3
def f(n, m):
while n > 0:
myjitdriver.can_enter_jit(n=n, m=m)
myjitdriver.jit_merge_point(n=n, m=m)
n -= externfn(m)
return n
res = self.meta_interp(f, [21, 5])
assert res == -1
# the CALL_PURE is constant-folded away by optimizeopt.py
self.check_resops(call_pure_i=0, call_i=0, int_sub=2)
def test_constfold_call_elidable_2(self):
myjitdriver = JitDriver(greens = ['m'], reds = ['n'])
@elidable
def externfn(x):
return x - 3
class V:
def __init__(self, value):
self.value = value
def f(n, m):
while n > 0:
myjitdriver.can_enter_jit(n=n, m=m)
myjitdriver.jit_merge_point(n=n, m=m)
v = V(m)
n -= externfn(v.value)
return n
res = self.meta_interp(f, [21, 5])
assert res == -1
# the CALL_PURE is constant-folded away by optimizeopt.py
self.check_resops(call_pure_i=0, call_i=0, int_sub=2)
def test_elidable_function_returning_object(self):
myjitdriver = JitDriver(greens = ['m'], reds = ['n'])
class V:
def __init__(self, x):
self.x = x
v1 = V(1)
v2 = V(2)
@elidable
def externfn(x):
if x:
return v1
else:
return v2
def f(n, m):
while n > 0:
myjitdriver.can_enter_jit(n=n, m=m)
myjitdriver.jit_merge_point(n=n, m=m)
m = V(m).x
n -= externfn(m).x + externfn(m + m - m).x
return n
res = self.meta_interp(f, [21, 5])
assert res == -1
# the CALL_PURE is constant-folded away by optimizeopt.py
self.check_resops(call_pure_r=0, call_r=0, getfield_gc_i=1, int_sub=2,
call_pure_i=0, call_i=0)
def test_elidable_raising(self):
myjitdriver = JitDriver(greens = ['m'], reds = ['n'])
@elidable
def externfn(x):
if x <= 0:
raise ValueError
return x - 1
def f(n, m):
while n > 0:
myjitdriver.can_enter_jit(n=n, m=m)
myjitdriver.jit_merge_point(n=n, m=m)
try:
n -= externfn(m)
except ValueError:
n -= 1
return n
res = self.meta_interp(f, [22, 6])
assert res == -3
# the CALL_PURE is constant-folded away during tracing
self.check_resops(call_pure_i=0, call_i=0, int_sub=2)
#
res = self.meta_interp(f, [22, -5])
assert res == 0
# raises: becomes CALL and is not constant-folded away
self.check_resops(call_pure_i=0, call_i=2, int_sub=2)
def test_elidable_raising_2(self):
myjitdriver = JitDriver(greens = ['m'], reds = ['n'])
@elidable
def externfn(x):
if x <= 0:
raise ValueError
return x - 1
def f(n, m):
while n > 0:
myjitdriver.can_enter_jit(n=n, m=m)
myjitdriver.jit_merge_point(n=n, m=m)
try:
n -= externfn(noConst(m))
except ValueError:
n -= 1
return n
res = self.meta_interp(f, [22, 6])
assert res == -3
# the CALL_PURE is constant-folded away by optimizeopt.py
self.check_resops(call_pure_i=0, call_i=0, int_sub=2)
#
res = self.meta_interp(f, [22, -5])
assert res == 0
# raises: becomes CALL and is not constant-folded away
self.check_resops(call_pure_i=0, call_i=2, int_sub=2)
def test_constant_across_mp(self):
myjitdriver = JitDriver(greens = [], reds = ['n'])
class X(object):
pass
def f(n):
while n > -100:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
x = X()
x.arg = 5
if n <= 0: break
n -= x.arg
x.arg = 6 # prevents 'x.arg' from being annotated as constant
return n
res = self.meta_interp(f, [31])
assert res == -4
def test_stopatxpolicy(self):
myjitdriver = JitDriver(greens = [], reds = ['y'])
def internfn(y):
return y * 3
def externfn(y):
return y % 4
def f(y):
while y >= 0:
myjitdriver.can_enter_jit(y=y)
myjitdriver.jit_merge_point(y=y)
if y & 7:
f = internfn
else:
f = externfn
f(y)
y -= 1
return 42
policy = StopAtXPolicy(externfn)
res = self.meta_interp(f, [31], policy=policy)
assert res == 42
self.check_resops(int_mul=2, int_mod=0)
def test_we_are_jitted(self):
myjitdriver = JitDriver(greens = [], reds = ['y'])
def f(y):
while y >= 0:
myjitdriver.can_enter_jit(y=y)
myjitdriver.jit_merge_point(y=y)
if we_are_jitted():
x = 1
else:
x = 10
y -= x
return y
assert f(55) == -5
res = self.meta_interp(f, [55])
assert res == -1
def test_confirm_enter_jit(self):
def confirm_enter_jit(x, y):
return x <= 5
myjitdriver = JitDriver(greens = ['x'], reds = ['y'],
confirm_enter_jit = confirm_enter_jit)
def f(x, y):
while y >= 0:
myjitdriver.can_enter_jit(x=x, y=y)
myjitdriver.jit_merge_point(x=x, y=y)
y -= x
return y
#
res = self.meta_interp(f, [10, 84])
assert res == -6
self.check_trace_count(0)
#
res = self.meta_interp(f, [3, 19])
assert res == -2
self.check_trace_count(1)
def test_can_never_inline(self):
def can_never_inline(x):
return x > 50
myjitdriver = JitDriver(greens = ['x'], reds = ['y'],
can_never_inline = can_never_inline)
@dont_look_inside
def marker():
pass
def f(x, y):
while y >= 0:
myjitdriver.can_enter_jit(x=x, y=y)
myjitdriver.jit_merge_point(x=x, y=y)
x += 1
if x == 4 or x == 61:
marker()
y -= x
return y
#
res = self.meta_interp(f, [3, 6], repeat=7, function_threshold=0)
assert res == 6 - 4 - 5
self.check_history(call_n=0) # because the trace starts in the middle
#
res = self.meta_interp(f, [60, 84], repeat=7)
assert res == 84 - 61 - 62
self.check_history(call_n=1) # because the trace starts immediately
def test_unroll_one_loop_iteration(self):
def unroll(code):
return code == 0
myjitdriver = JitDriver(greens = ['code'],
reds = ['loops', 'inner_loops', 's'],
should_unroll_one_iteration=unroll)
def f(code, loops, inner_loops):
s = 0
while loops > 0:
myjitdriver.jit_merge_point(code=code, loops=loops,
inner_loops=inner_loops, s=s)
if code == 1:
s += f(0, inner_loops, 0)
loops -= 1
s += 1
return s
res = self.meta_interp(f, [1, 4, 1], enable_opts="", inline=True)
assert res == f(1, 4, 1)
self.check_history(call_assembler_i=0)
res = self.meta_interp(f, [1, 4, 2], enable_opts="", inline=True)
assert res == f(1, 4, 2)
self.check_history(call_assembler_i=1)
def test_format(self):
def f(n):
return len("<%d>" % n)
res = self.interp_operations(f, [421])
assert res == 5
def test_switch(self):
def f(n):
if n == -5: return 12
elif n == 2: return 51
elif n == 7: return 1212
else: return 42
res = self.interp_operations(f, [7])
assert res == 1212
res = self.interp_operations(f, [12311])
assert res == 42
def test_switch_bridges(self):
from rpython.rlib.rarithmetic import intmask
myjitdriver = JitDriver(greens = [], reds = 'auto')
lsts = [[-5, 2, 20] * 6,
[7, 123, 2] * 6,
[12311, -5, 7] * 6,
[7, 123, 2] * 4 + [-5, -5, -5] * 2,
[7, 123, 2] * 4 + [-5, -5, -5] * 2 + [12311, 12311, 12311],
]
def f(case):
x = 0
i = 0
lst = lsts[case]
while i < len(lst):
myjitdriver.jit_merge_point()
n = lst[i]
if n == -5: a = 5
elif n == 2: a = 4
elif n == 7: a = 3
else: a = 2
x = intmask(x * 10 + a)
#print "XXXXXXXXXXXXXXXX", x
i += 1
return x
res = self.meta_interp(f, [0], backendopt=True)
assert res == intmask(542 * 1001001001001001)
res = self.meta_interp(f, [1], backendopt=True)
assert res == intmask(324 * 1001001001001001)
res = self.meta_interp(f, [2], backendopt=True)
assert res == intmask(253 * 1001001001001001)
res = self.meta_interp(f, [3], backendopt=True)
assert res == intmask(324324324324555555)
res = self.meta_interp(f, [4], backendopt=True)
assert res == intmask(324324324324555555222)
def test_r_uint(self):
from rpython.rlib.rarithmetic import r_uint
myjitdriver = JitDriver(greens = [], reds = ['y'])
def f(y):
y = r_uint(y)
while y > 0:
myjitdriver.can_enter_jit(y=y)
myjitdriver.jit_merge_point(y=y)
y -= 1
return y
res = self.meta_interp(f, [10])
assert res == 0
def test_uint_operations(self):
from rpython.rlib.rarithmetic import r_uint
def f(n):
return ((r_uint(n) - 123) >> 1) <= r_uint(456)
res = self.interp_operations(f, [50])
assert res == False
self.check_operations_history(int_rshift=0, uint_rshift=1,
int_le=0, uint_le=1,
int_sub=1)
def test_uint_condition(self):
from rpython.rlib.rarithmetic import r_uint
def f(n):
if ((r_uint(n) - 123) >> 1) <= r_uint(456):
return 24
else:
return 12
res = self.interp_operations(f, [50])
assert res == 12
self.check_operations_history(int_rshift=0, uint_rshift=1,
int_le=0, uint_le=1,
int_sub=1)
def test_int_between(self):
#
def check(arg1, arg2, arg3, expect_result, **expect_operations):
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
loc = locals().copy()
exec py.code.Source("""
def f(n, m, p):
arg1 = %(arg1)s
arg2 = %(arg2)s
arg3 = %(arg3)s
return llop.int_between(lltype.Bool, arg1, arg2, arg3)
""" % locals()).compile() in loc
res = self.interp_operations(loc['f'], [5, 6, 7])
assert res == expect_result
self.check_operations_history(expect_operations)
#
check('n', 'm', 'p', True, int_sub=2, uint_lt=1)
check('n', 'p', 'm', False, int_sub=2, uint_lt=1)
#
check('n', 'm', 6, False, int_sub=2, uint_lt=1)
#
check('n', 4, 'p', False, int_sub=2, uint_lt=1)
check('n', 5, 'p', True, int_sub=2, uint_lt=1)
check('n', 8, 'p', False, int_sub=2, uint_lt=1)
#
check('n', 6, 7, True, int_sub=2, uint_lt=1)
#
check(-2, 'n', 'p', True, int_sub=2, uint_lt=1)
check(-2, 'm', 'p', True, int_sub=2, uint_lt=1)
check(-2, 'p', 'm', False, int_sub=2, uint_lt=1)
#check(0, 'n', 'p', True, uint_lt=1) xxx implement me
#check(0, 'm', 'p', True, uint_lt=1)
#check(0, 'p', 'm', False, uint_lt=1)
#
check(2, 'n', 6, True, int_sub=1, uint_lt=1)
check(2, 'm', 6, False, int_sub=1, uint_lt=1)
check(2, 'p', 6, False, int_sub=1, uint_lt=1)
check(5, 'n', 6, True, int_eq=1) # 6 == 5+1
check(5, 'm', 6, False, int_eq=1) # 6 == 5+1
#
check(2, 6, 'm', False, int_sub=1, uint_lt=1)
check(2, 6, 'p', True, int_sub=1, uint_lt=1)
#
check(2, 40, 6, False)
check(2, 40, 60, True)
def test_getfield(self):
class A:
pass
a1 = A()
a1.foo = 5
a2 = A()
a2.foo = 8
def f(x):
if x > 5:
a = a1
else:
a = a2
return a.foo * x
res = self.interp_operations(f, [42])
assert res == 210
self.check_operations_history(getfield_gc_i=1)
def test_getfield_immutable(self):
class A:
_immutable_ = True
a1 = A()
a1.foo = 5
a2 = A()
a2.foo = 8
def f(x):
if x > 5:
a = a1
else:
a = a2
return a.foo * x
res = self.interp_operations(f, [42])
assert res == 210
self.check_operations_history(getfield_gc_i=0)
def test_setfield_bool(self):
class A:
def __init__(self):
self.flag = True
myjitdriver = JitDriver(greens = [], reds = ['n', 'obj'])
def f(n):
obj = A()
res = False
while n > 0:
myjitdriver.can_enter_jit(n=n, obj=obj)
myjitdriver.jit_merge_point(n=n, obj=obj)
obj.flag = False
n -= 1
return res
res = self.meta_interp(f, [7])
assert type(res) == bool
assert not res
def test_int_add_ovf(self):
def f(x, y):
try:
return ovfcheck(x + y)
except OverflowError:
return -42
res = self.interp_operations(f, [-100, 2])
assert res == -98
res = self.interp_operations(f, [1, sys.maxint])
assert res == -42
def test_ovf_raise(self):
def g(x, y):
try:
return ovfcheck(x * y)
except OverflowError:
raise
def f(x, y):
try:
return g(x, y)
except OverflowError:
return 3
res = self.interp_operations(f, [sys.maxint, 2])
assert res == 3
res = self.interp_operations(f, [3, 2])
assert res == 6
def test_int_sub_ovf(self):
def f(x, y):
try:
return ovfcheck(x - y)
except OverflowError:
return -42
res = self.interp_operations(f, [-100, 2])
assert res == -102
res = self.interp_operations(f, [1, -sys.maxint])
assert res == -42
def test_int_mul_ovf(self):
def f(x, y):
try:
return ovfcheck(x * y)
except OverflowError:
return -42
res = self.interp_operations(f, [-100, 2])
assert res == -200
res = self.interp_operations(f, [-3, sys.maxint//2])
assert res == -42
def test_mod_ovf(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'y'])
def f(n, x, y):
while n > 0:
myjitdriver.can_enter_jit(x=x, y=y, n=n)
myjitdriver.jit_merge_point(x=x, y=y, n=n)
n -= ovfcheck(x % y)
return n
res = self.meta_interp(f, [20, 1, 2])
assert res == 0
self.check_resops(call_i=0, call_r=0)
def test_abs(self):
myjitdriver = JitDriver(greens = [], reds = ['i', 't'])
def f(i):
t = 0
while i < 10:
myjitdriver.can_enter_jit(i=i, t=t)
myjitdriver.jit_merge_point(i=i, t=t)
t += abs(i)
i += 1
return t
res = self.meta_interp(f, [-5])
assert res == 5+4+3+2+1+0+1+2+3+4+5+6+7+8+9
def test_float(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
def f(x, y):
x = float(x)
y = float(y)
res = 0.0
while y > 0.0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
res += x
y -= 1.0
return res
res = self.meta_interp(f, [6, 7])
assert res == 42.0
self.check_trace_count(1)
self.check_resops({'jump': 1, 'float_gt': 2, 'float_add': 2,
'float_sub': 2, 'guard_true': 2})
def test_print(self):
myjitdriver = JitDriver(greens = [], reds = ['n'])
def f(n):
while n > 0:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
print n
n -= 1
return n
res = self.meta_interp(f, [7])
assert res == 0
def test_bridge_from_interpreter_1(self):
mydriver = JitDriver(reds = ['n'], greens = [])
def f(n):
while n > 0:
mydriver.can_enter_jit(n=n)
mydriver.jit_merge_point(n=n)
n -= 1
self.meta_interp(f, [20], repeat=7)
# the loop and the entry path as a single trace
self.check_jitcell_token_count(1)
# we get:
# ENTER - compile the new loop and the entry bridge
# ENTER - compile the leaving path
self.check_enter_count(2)
def test_bridge_from_interpreter_2(self):
# one case for backend - computing of framesize on guard failure
mydriver = JitDriver(reds = ['n'], greens = [])
glob = [1]
def f(n):
while n > 0:
mydriver.can_enter_jit(n=n)
mydriver.jit_merge_point(n=n)
if n == 17 and glob[0]:
glob[0] = 0
x = n + 1
y = n + 2
z = n + 3
k = n + 4
n -= 1
n += x + y + z + k
n -= x + y + z + k
n -= 1
self.meta_interp(f, [20], repeat=7)
def test_bridge_from_interpreter_3(self):
# one case for backend - computing of framesize on guard failure
mydriver = JitDriver(reds = ['n', 'x', 'y', 'z', 'k'], greens = [])
class Global:
pass
glob = Global()
def f(n):
glob.x = 1
x = 0
y = 0
z = 0
k = 0
while n > 0:
mydriver.can_enter_jit(n=n, x=x, y=y, z=z, k=k)
mydriver.jit_merge_point(n=n, x=x, y=y, z=z, k=k)
x += 10
y += 3
z -= 15
k += 4
if n == 17 and glob.x:
glob.x = 0
x += n + 1
y += n + 2
z += n + 3
k += n + 4
n -= 1
n -= 1
return x + 2*y + 3*z + 5*k + 13*n
res = self.meta_interp(f, [20], repeat=7)
assert res == f(20)
def test_bridge_from_interpreter_4(self):
jitdriver = JitDriver(reds = ['n', 'k'], greens = [])
def f(n, k):
while n > 0:
jitdriver.can_enter_jit(n=n, k=k)
jitdriver.jit_merge_point(n=n, k=k)
if k:
n -= 2
else:
n -= 1
return n + k
from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache
from rpython.jit.metainterp.warmspot import WarmRunnerDesc
interp, graph = get_interpreter(f, [0, 0], backendopt=False,
inline_threshold=0)
clear_tcache()
translator = interp.typer.annotator.translator
translator.config.translation.gc = "boehm"
warmrunnerdesc = WarmRunnerDesc(translator,
CPUClass=self.CPUClass)
state = warmrunnerdesc.jitdrivers_sd[0].warmstate
state.set_param_threshold(3) # for tests
state.set_param_trace_eagerness(0) # for tests
warmrunnerdesc.finish()
for n, k in [(20, 0), (20, 1)]:
interp.eval_graph(graph, [n, k])
def test_bridge_leaving_interpreter_5(self):
mydriver = JitDriver(reds = ['n', 'x'], greens = [])
class Global:
pass
glob = Global()
def f(n):
x = 0
glob.x = 1
while n > 0:
mydriver.can_enter_jit(n=n, x=x)
mydriver.jit_merge_point(n=n, x=x)
glob.x += 1
x += 3
n -= 1
glob.x += 100
return glob.x + x
res = self.meta_interp(f, [20], repeat=7)
assert res == f(20)
def test_instantiate_classes(self):
class Base: pass
class A(Base): foo = 72
class B(Base): foo = 8
def f(n):
if n > 5:
cls = A
else:
cls = B
return cls().foo
res = self.interp_operations(f, [3])
assert res == 8
res = self.interp_operations(f, [13])
assert res == 72
def test_instantiate_does_not_call(self):
mydriver = JitDriver(reds = ['n', 'x'], greens = [])
class Base: pass
class A(Base): foo = 72
class B(Base): foo = 8
def f(n):
x = 0
while n > 0:
mydriver.can_enter_jit(n=n, x=x)
mydriver.jit_merge_point(n=n, x=x)
if n % 2 == 0:
cls = A
else:
cls = B
inst = cls()
x += inst.foo
n -= 1
return x
res = self.meta_interp(f, [20], enable_opts='')
assert res == f(20)
self.check_resops(call_i=0, call_r=0)
def test_zerodivisionerror(self):
# test the case of exception-raising operation that is not delegated
# to the backend at all: ZeroDivisionError
#
def f(n):
assert n >= 0
try:
return ovfcheck(5 % n)
except ZeroDivisionError:
return -666
except OverflowError:
return -777
res = self.interp_operations(f, [0])
assert res == -666
#
def f(n):
assert n >= 0
try:
return ovfcheck(6 // n)
except ZeroDivisionError:
return -667
except OverflowError:
return -778
res = self.interp_operations(f, [0])
assert res == -667
def test_div_overflow(self):
import sys
from rpython.rtyper.lltypesystem.lloperation import llop
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
def f(x, y):
res = 0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
try:
res += llop.int_floordiv_ovf(lltype.Signed,
-sys.maxint-1, x)
x += 5
except OverflowError:
res += 100
y -= 1
return res
res = self.meta_interp(f, [-41, 16])
assert res == ((-sys.maxint-1) // (-41) +
(-sys.maxint-1) // (-36) +
(-sys.maxint-1) // (-31) +
(-sys.maxint-1) // (-26) +
(-sys.maxint-1) // (-21) +
(-sys.maxint-1) // (-16) +
(-sys.maxint-1) // (-11) +
(-sys.maxint-1) // (-6) +
100 * 8)
def test_isinstance(self):
class A:
pass
class B(A):
pass
@dont_look_inside
def extern(n):
if n:
return A()
else:
return B()
def fn(n):
obj = extern(n)
return isinstance(obj, B)
res = self.interp_operations(fn, [0])
assert res
self.check_operations_history(guard_class=1)
res = self.interp_operations(fn, [1])
assert not res
def test_isinstance_2(self):
driver = JitDriver(greens = [], reds = ['n', 'sum', 'x'])
class A:
pass
class B(A):
pass
class C(B):
pass
def main():
return f(5, B()) * 10 + f(5, C()) + f(5, A()) * 100
def f(n, x):
sum = 0
while n > 0:
driver.can_enter_jit(x=x, n=n, sum=sum)
driver.jit_merge_point(x=x, n=n, sum=sum)
if isinstance(x, B):
sum += 1
n -= 1
return sum
res = self.meta_interp(main, [])
assert res == 55
def test_assert_isinstance(self):
class A:
pass
class B(A):
pass
def fn(n):
# this should only be called with n != 0
if n:
obj = B()
obj.a = n
else:
obj = A()
obj.a = 17
assert isinstance(obj, B)
return obj.a
res = self.interp_operations(fn, [1])
assert res == 1
self.check_operations_history(guard_class=0)
def test_r_dict(self):
from rpython.rlib.objectmodel import r_dict
class FooError(Exception):
pass
def myeq(n, m):
return n == m
def myhash(n):
if n < 0:
raise FooError
return -n
def f(n):
d = r_dict(myeq, myhash)
for i in range(10):
d[i] = i*i
try:
return d[n]
except FooError:
return 99
res = self.interp_operations(f, [5])
assert res == f(5)
def test_free_object(self):
import weakref
from rpython.rtyper.lltypesystem.lloperation import llop
myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
class X(object):
pass
def main(n, x):
while n > 0:
myjitdriver.can_enter_jit(n=n, x=x)
myjitdriver.jit_merge_point(n=n, x=x)
n -= x.foo
def g(n):
x = X()
x.foo = 2
main(n, x)
x.foo = 5
return weakref.ref(x)
def f(n):
r = g(n)
rgc.collect(); rgc.collect(); rgc.collect()
return r() is None
#
assert f(30) == 1
res = self.meta_interp(f, [30], no_stats=True)
assert res == 1
def test_pass_around(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
def call():
pass
def f(n, x):
while n > 0:
myjitdriver.can_enter_jit(n=n, x=x)
myjitdriver.jit_merge_point(n=n, x=x)
if n % 2:
call()
if n == 8:
return x
x = 3
else:
x = 5
n -= 1
return 0
self.meta_interp(f, [40, 0])
def test_const_inputargs(self):
myjitdriver = JitDriver(greens = ['m'], reds = ['n', 'x'])
def f(n, x):
m = 0x7FFFFFFF
while n > 0:
myjitdriver.can_enter_jit(m=m, n=n, x=x)
myjitdriver.jit_merge_point(m=m, n=n, x=x)
x = 42
n -= 1
m = m >> 1
return x
res = self.meta_interp(f, [50, 1], enable_opts='')
assert res == 42
def test_set_param(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
def g(n):
x = 0
while n > 0:
myjitdriver.can_enter_jit(n=n, x=x)
myjitdriver.jit_merge_point(n=n, x=x)
n -= 1
x += n
return x
def f(n, threshold, arg):
if arg:
set_param(myjitdriver, 'threshold', threshold)
else:
set_param(None, 'threshold', threshold)
return g(n)
res = self.meta_interp(f, [10, 3, 1])
assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0
self.check_jitcell_token_count(1)
res = self.meta_interp(f, [10, 13, 0])
assert res == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0
self.check_jitcell_token_count(0)
def test_dont_look_inside(self):
@dont_look_inside
def g(a, b):
return a + b
def f(a, b):
return g(a, b)
res = self.interp_operations(f, [3, 5])
assert res == 8
self.check_operations_history(int_add=0, call_i=1)
def test_listcomp(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst'])
def f(x, y):
lst = [0, 0, 0]
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, lst=lst)
myjitdriver.jit_merge_point(x=x, y=y, lst=lst)
lst = [i+x for i in lst if i >=0]
y -= 1
return lst[0]
res = self.meta_interp(f, [6, 7], listcomp=True, backendopt=True, listops=True)
# XXX: the loop looks inefficient
assert res == 42
def test_tuple_immutable(self):
def new(a, b):
return a, b
def f(a, b):
tup = new(a, b)
return tup[1]
res = self.interp_operations(f, [3, 5])
assert res == 5
self.check_operations_history(setfield_gc=2, getfield_gc_pure_i=0)
def test_oosend_look_inside_only_one(self):
class A:
pass
class B(A):
def g(self):
return 123
class C(A):
@dont_look_inside
def g(self):
return 456
def f(n):
if n > 3:
x = B()
else:
x = C()
return x.g() + x.g()
res = self.interp_operations(f, [10])
assert res == 123 * 2
res = self.interp_operations(f, [-10])
assert res == 456 * 2
def test_residual_external_call(self):
import math
myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res'])
# When this test was written ll_math couldn't be inlined, now it can,
# instead of rewriting this test, just ensure that an external call is
# still generated by wrapping the function.
@dont_look_inside
def modf(x):
return math.modf(x)
def f(x, y):
x = float(x)
res = 0.0
while y > 0:
myjitdriver.can_enter_jit(x=x, y=y, res=res)
myjitdriver.jit_merge_point(x=x, y=y, res=res)
# this is an external call that the default policy ignores
rpart, ipart = modf(x)
res += ipart
y -= 1
return res
res = self.meta_interp(f, [6, 7])
assert res == 42
self.check_trace_count(1)
self.check_resops(call_r=2)
def test_merge_guardclass_guardvalue(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'l'])
class A(object):
def g(self, x):
return x - 5
class B(A):
def g(self, y):
return y - 3
a1 = A()
a2 = A()
b = B()
def f(x):
l = [a1] * 100 + [a2] * 100 + [b] * 100
while x > 0:
myjitdriver.can_enter_jit(x=x, l=l)
myjitdriver.jit_merge_point(x=x, l=l)
a = l[x]
x = a.g(x)
promote(a)
return x
res = self.meta_interp(f, [299], listops=True)
assert res == f(299)
self.check_resops(guard_class=0, guard_value=6)
def test_merge_guardnonnull_guardclass(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'l'])
class A(object):
def g(self, x):
return x - 3
class B(A):
def g(self, y):
return y - 5
a1 = A()
b1 = B()
def f(x):
l = [None] * 100 + [b1] * 100 + [a1] * 100
while x > 0:
myjitdriver.can_enter_jit(x=x, l=l)
myjitdriver.jit_merge_point(x=x, l=l)
a = l[x]
if a:
x = a.g(x)
else:
x -= 7
return x
res = self.meta_interp(f, [299], listops=True)
assert res == f(299)
self.check_resops(guard_class=0, guard_nonnull=4,
guard_nonnull_class=4, guard_isnull=2)
def test_merge_guardnonnull_guardvalue(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'l'])
class A(object):
pass
class B(A):
pass
a1 = A()
b1 = B()
def f(x):
l = [b1] * 100 + [None] * 100 + [a1] * 100
while x > 0:
myjitdriver.can_enter_jit(x=x, l=l)
myjitdriver.jit_merge_point(x=x, l=l)
a = l[x]
if a:
x -= 5
else:
x -= 7
promote(a)
return x
res = self.meta_interp(f, [299], listops=True)
assert res == f(299)
self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4,
guard_nonnull_class=0, guard_isnull=2)
def test_merge_guardnonnull_guardvalue_2(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'l'])
class A(object):
pass
class B(A):
pass
a1 = A()
b1 = B()
def f(x):
l = [None] * 100 + [b1] * 100 + [a1] * 100
while x > 0:
myjitdriver.can_enter_jit(x=x, l=l)
myjitdriver.jit_merge_point(x=x, l=l)
a = l[x]
if a:
x -= 5
else:
x -= 7
promote(a)
return x
res = self.meta_interp(f, [299], listops=True)
assert res == f(299)
self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4,
guard_nonnull_class=0, guard_isnull=2)
def test_merge_guardnonnull_guardclass_guardvalue(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'l'])
class A(object):
def g(self, x):
return x - 3
class B(A):
def g(self, y):
return y - 5
a1 = A()
a2 = A()
b1 = B()
def f(x):
l = [a2] * 100 + [None] * 100 + [b1] * 100 + [a1] * 100
while x > 0:
myjitdriver.can_enter_jit(x=x, l=l)
myjitdriver.jit_merge_point(x=x, l=l)
a = l[x]
if a:
x = a.g(x)
else:
x -= 7
promote(a)
return x
res = self.meta_interp(f, [399], listops=True)
assert res == f(399)
self.check_resops(guard_class=0, guard_nonnull=6, guard_value=6,
guard_nonnull_class=0, guard_isnull=2)
def test_residual_call_doesnt_lose_info(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'l'])
class A(object):
pass
globall = [""]
@dont_look_inside
def g(x):
globall[0] = str(x)
return x
def f(x):
y = A()
y.v = x
l = [0]
while y.v > 0:
myjitdriver.can_enter_jit(x=x, y=y, l=l)
myjitdriver.jit_merge_point(x=x, y=y, l=l)
l[0] = y.v
lc = l[0]
y.v = g(y.v) - y.v/y.v + lc/l[0] - 1
return y.v
res = self.meta_interp(f, [20], listops=True)
self.check_resops(getarrayitem_gc_i=0, getfield_gc_i=1)
def test_guard_isnull_nonnull(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'res'])
class A(object):
pass
@dont_look_inside
def create(x):
if x >= -40:
return A()
return None
def f(x):
res = 0
while x > 0:
myjitdriver.can_enter_jit(x=x, res=res)
myjitdriver.jit_merge_point(x=x, res=res)
obj = create(x-1)
if obj is not None:
res += 1
obj2 = create(x-1000)
if obj2 is None:
res += 1
x -= 1
return res
res = self.meta_interp(f, [21])
assert res == 42
self.check_resops(guard_nonnull=2, guard_isnull=2)
def test_loop_invariant1(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'res'])
class A(object):
pass
a = A()
a.current_a = A()
a.current_a.x = 1
@loop_invariant
def f():
return a.current_a
def g(x):
res = 0
while x > 0:
myjitdriver.can_enter_jit(x=x, res=res)
myjitdriver.jit_merge_point(x=x, res=res)
res += f().x
res += f().x
res += f().x
x -= 1
a.current_a = A()
a.current_a.x = 2
return res
res = self.meta_interp(g, [21])
assert res == 3 * 21
self.check_resops(call_r=1)
def test_bug_optimizeopt_mutates_ops(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'res', 'const', 'a'])
class A(object):
pass
class B(A):
pass
glob = A()
glob.a = None
def f(x):
res = 0
a = A()
a.x = 0
glob.a = A()
const = 2
while x > 0:
myjitdriver.can_enter_jit(x=x, res=res, a=a, const=const)
myjitdriver.jit_merge_point(x=x, res=res, a=a, const=const)
if type(glob.a) is B:
res += 1
if a is None:
a = A()
a.x = x
glob.a = B()
const = 2
else:
promote(const)
x -= const
res += a.x
a = None
glob.a = A()
const = 1
return res
res = self.meta_interp(f, [21])
assert res == f(21)
def test_getitem_indexerror(self):
lst = [10, 4, 9, 16]
def f(n):
try:
return lst[n]
except IndexError:
return -2
res = self.interp_operations(f, [2])
assert res == 9
res = self.interp_operations(f, [4])
assert res == -2
res = self.interp_operations(f, [-4])
assert res == 10
res = self.interp_operations(f, [-5])
assert res == -2
def test_guard_always_changing_value(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'a'])
def f(x):
a = 0
while x > 0:
myjitdriver.can_enter_jit(x=x, a=a)
myjitdriver.jit_merge_point(x=x, a=a)
a += 1
promote(a)
x -= 1
self.meta_interp(f, [50])
self.check_trace_count(1)
# this checks that the logic triggered by make_a_counter_per_value()
# works and prevents generating tons of bridges
def test_swap_values(self):
def f(x, y):
if x > 5:
x, y = y, x
return x - y
res = self.interp_operations(f, [10, 2])
assert res == -8
res = self.interp_operations(f, [3, 2])
assert res == 1
def test_raw_malloc_and_access(self):
TP = rffi.CArray(lltype.Signed)
def f(n):
a = lltype.malloc(TP, n, flavor='raw')
a[0] = n
res = a[0]
lltype.free(a, flavor='raw')
return res
res = self.interp_operations(f, [10])
assert res == 10
def test_raw_malloc_and_access_float(self):
TP = rffi.CArray(lltype.Float)
def f(n, f):
a = lltype.malloc(TP, n, flavor='raw')
a[0] = f
res = a[0]
lltype.free(a, flavor='raw')
return res
res = self.interp_operations(f, [10, 3.5])
assert res == 3.5
def test_jit_debug(self):
myjitdriver = JitDriver(greens = [], reds = ['x'])
class A:
pass
def f(x):
while x > 0:
myjitdriver.can_enter_jit(x=x)
myjitdriver.jit_merge_point(x=x)
jit_debug("hi there:", x)
jit_debug("foobar")
x -= 1
return x
res = self.meta_interp(f, [8])
assert res == 0
self.check_resops(jit_debug=4)
def test_assert_green(self):
def f(x, promote_flag):
if promote_flag:
promote(x)
assert_green(x)
return x
res = self.interp_operations(f, [8, 1])
assert res == 8
py.test.raises(AssertGreenFailed, self.interp_operations, f, [8, 0])
def test_multiple_specialied_versions1(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res'])
class Base:
def __init__(self, val):
self.val = val
class A(Base):
def binop(self, other):
return A(self.val + other.val)
class B(Base):
def binop(self, other):
return B(self.val * other.val)
def f(x, y):
res = x
while y > 0:
myjitdriver.can_enter_jit(y=y, x=x, res=res)
myjitdriver.jit_merge_point(y=y, x=x, res=res)
res = res.binop(x)
y -= 1
return res
def g(x, y):
a1 = f(A(x), y)
a2 = f(A(x), y)
b1 = f(B(x), y)
b2 = f(B(x), y)
assert a1.val == a2.val
assert b1.val == b2.val
return a1.val + b1.val
res = self.meta_interp(g, [6, 7])
assert res == 6*8 + 6**8
self.check_trace_count(4)
self.check_resops({'guard_class': 2, 'int_gt': 4,
'getfield_gc_i': 4, 'guard_true': 4,
'int_sub': 4, 'jump': 2, 'int_mul': 2,
'int_add': 2})
def test_multiple_specialied_versions_array(self):
myjitdriver = JitDriver(greens = [], reds = ['idx', 'y', 'x', 'res',
'array'])
class Base:
def __init__(self, val):
self.val = val
class A(Base):
def binop(self, other):
return A(self.val + other.val)
class B(Base):
def binop(self, other):
return B(self.val - other.val)
def f(x, y):
res = x
array = [1, 2, 3]
array[1] = 7
idx = 0
while y > 0:
myjitdriver.can_enter_jit(idx=idx, y=y, x=x, res=res,
array=array)
myjitdriver.jit_merge_point(idx=idx, y=y, x=x, res=res,
array=array)
res = res.binop(x)
res.val += array[idx] + array[1]
if y < 10:
idx = 2
y -= 1
return res
def g(x, y):
a1 = f(A(x), y)
a2 = f(A(x), y)
b1 = f(B(x), y)
b2 = f(B(x), y)
assert a1.val == a2.val
assert b1.val == b2.val
return a1.val + b1.val
res = self.meta_interp(g, [6, 20])
assert res == g(6, 20)
self.check_trace_count(8)
# 6 extra from sharing guard data
self.check_resops(getarrayitem_gc_i=10 + 6)
def test_multiple_specialied_versions_bridge(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res'])
class Base:
def __init__(self, val):
self.val = val
def getval(self):
return self.val
class A(Base):
def binop(self, other):
return A(self.getval() + other.getval())
class B(Base):
def binop(self, other):
return B(self.getval() * other.getval())
def f(x, y, z):
res = x
while y > 0:
myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res)
myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res)
res = res.binop(x)
y -= 1
if y < 7:
x = z
return res
def g(x, y):
a1 = f(A(x), y, A(x))
a2 = f(A(x), y, A(x))
assert a1.val == a2.val
b1 = f(B(x), y, B(x))
b2 = f(B(x), y, B(x))
assert b1.val == b2.val
c1 = f(B(x), y, A(x))
c2 = f(B(x), y, A(x))
assert c1.val == c2.val
d1 = f(A(x), y, B(x))
d2 = f(A(x), y, B(x))
assert d1.val == d2.val
return a1.val + b1.val + c1.val + d1.val
res = self.meta_interp(g, [3, 14])
assert res == g(3, 14)
def test_failing_inlined_guard(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res'])
class Base:
def __init__(self, val):
self.val = val
def getval(self):
return self.val
class A(Base):
def binop(self, other):
return A(self.getval() + other.getval())
class B(Base):
def binop(self, other):
return B(self.getval() * other.getval())
def f(x, y, z):
res = x
while y > 0:
myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res)
myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res)
res = res.binop(x)
y -= 1
if y < 8:
x = z
return res
def g(x, y):
c1 = f(A(x), y, B(x))
c2 = f(A(x), y, B(x))
assert c1.val == c2.val
return c1.val
res = self.meta_interp(g, [3, 16])
assert res == g(3, 16)
def test_inlined_guard_in_short_preamble(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res'])
class A:
def __init__(self, val):
self.val = val
def getval(self):
return self.val
def binop(self, other):
return A(self.getval() + other.getval())
def f(x, y, z):
res = x
while y > 0:
myjitdriver.can_enter_jit(y=y, x=x, z=z, res=res)
myjitdriver.jit_merge_point(y=y, x=x, z=z, res=res)
res = res.binop(x)
y -= 1
if y < 7:
x = z
return res
def g(x, y):
a1 = f(A(x), y, A(x))
a2 = f(A(x), y, A(x))
assert a1.val == a2.val
return a1.val
res = self.meta_interp(g, [3, 14])
assert res == g(3, 14)
def test_specialied_bridge(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res'])
class A:
def __init__(self, val):
self.val = val
def binop(self, other):
return A(self.val + other.val)
def f(x, y):
res = A(0)
while y > 0:
myjitdriver.can_enter_jit(y=y, x=x, res=res)
myjitdriver.jit_merge_point(y=y, x=x, res=res)
res = res.binop(A(y))
if y<7:
res = x
y -= 1
return res
def g(x, y):
a1 = f(A(x), y)
a2 = f(A(x), y)
assert a1.val == a2.val
return a1.val
res = self.meta_interp(g, [6, 14])
assert res == g(6, 14)
def test_specialied_bridge_const(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'const', 'x', 'res'])
class A:
def __init__(self, val):
self.val = val
def binop(self, other):
return A(self.val + other.val)
def f(x, y):
res = A(0)
const = 7
while y > 0:
myjitdriver.can_enter_jit(y=y, x=x, res=res, const=const)
myjitdriver.jit_merge_point(y=y, x=x, res=res, const=const)
const = promote(const)
res = res.binop(A(const))
if y<7:
res = x
y -= 1
return res
def g(x, y):
a1 = f(A(x), y)
a2 = f(A(x), y)
assert a1.val == a2.val
return a1.val
res = self.meta_interp(g, [6, 14])
assert res == g(6, 14)
def test_multiple_specialied_zigzag(self):
myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res'])
class Base:
def __init__(self, val):
self.val = val
class A(Base):
def binop(self, other):
return A(self.val + other.val)
def switch(self):
return B(self.val)
class B(Base):
def binop(self, other):
return B(self.val * other.val)
def switch(self):
return A(self.val)
def f(x, y):
res = x
while y > 0:
myjitdriver.can_enter_jit(y=y, x=x, res=res)
myjitdriver.jit_merge_point(y=y, x=x, res=res)
if y % 4 == 0:
res = res.switch()
res = res.binop(x)
y -= 1
return res
def g(x, y):
set_param(myjitdriver, 'max_unroll_loops', 5)
a1 = f(A(x), y)
a2 = f(A(x), y)
b1 = f(B(x), y)
b2 = f(B(x), y)
assert a1.val == a2.val
assert b1.val == b2.val
return a1.val + b1.val
res = self.meta_interp(g, [3, 23])
assert res == 7068153
self.check_trace_count(6)
self.check_resops(guard_true=8, guard_class=2, int_mul=3,
int_add=3, guard_false=4)
def test_dont_trace_every_iteration(self):
myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa'])
def main(a, b):
i = sa = 0
#while i < 200:
while i < 200:
myjitdriver.can_enter_jit(a=a, b=b, i=i, sa=sa)
myjitdriver.jit_merge_point(a=a, b=b, i=i, sa=sa)
if a > 0: pass
if b < 2: pass
sa += a % b
i += 1
return sa
def g():
return main(10, 20) + main(-10, -20)
res = self.meta_interp(g, [])
assert res == g()
self.check_enter_count(2)
def test_current_trace_length(self):
myjitdriver = JitDriver(greens = ['g'], reds = ['x', 'l'])
@dont_look_inside
def residual():
print "hi there"
@unroll_safe
def loop(g):
y = 0
while y < g:
residual()
y += 1
def f(x, g):
l = []
n = 0
while x > 0:
myjitdriver.can_enter_jit(x=x, g=g, l=l)
myjitdriver.jit_merge_point(x=x, g=g, l=l)
loop(g)
x -= 1
l.append(current_trace_length())
return l[-2] # not the blackholed version
res = self.meta_interp(f, [5, 8])
assert 14 < res < 42
res = self.meta_interp(f, [5, 2])
assert 4 < res < 14
def test_compute_identity_hash(self):
from rpython.rlib.objectmodel import compute_identity_hash
class A(object):
pass
def f():
a = A()
return compute_identity_hash(a) == compute_identity_hash(a)
res = self.interp_operations(f, [])
assert res
# a "did not crash" kind of test
def test_compute_unique_id(self):
from rpython.rlib.objectmodel import compute_unique_id
class A(object):
pass
def f():
a1 = A()
a2 = A()
return (compute_unique_id(a1) == compute_unique_id(a1) and
compute_unique_id(a1) != compute_unique_id(a2))
res = self.interp_operations(f, [])
assert res
def test_wrap_around_add(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'n'])
class A:
pass
def f(x):
n = 0
while x > 0:
myjitdriver.can_enter_jit(x=x, n=n)
myjitdriver.jit_merge_point(x=x, n=n)
x += 1
n += 1
return n
res = self.meta_interp(f, [sys.maxint-10])
assert res == 11
self.check_jitcell_token_count(1)
def test_wrap_around_mul(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'n'])
class A:
pass
def f(x):
n = 0
while x > 0:
myjitdriver.can_enter_jit(x=x, n=n)
myjitdriver.jit_merge_point(x=x, n=n)
x *= 2
n += 1
return n
res = self.meta_interp(f, [sys.maxint>>10])
assert res == 11
self.check_jitcell_token_count(1)
def test_wrap_around_sub(self):
myjitdriver = JitDriver(greens = [], reds = ['x', 'n'])
class A:
pass
def f(x):
n = 0
while x < 0:
myjitdriver.can_enter_jit(x=x, n=n)
myjitdriver.jit_merge_point(x=x, n=n)
x -= 1
n += 1
return n
res = self.meta_interp(f, [10-sys.maxint])
assert res == 12
self.check_jitcell_token_count(1)
def test_caching_setfield(self):
myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node'])
class A:
pass
def f(n, a):
i = sa = 0
node = A()
node.val1 = node.val2 = 0
while i < n:
myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node)
myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node)
sa += node.val1 + node.val2
if i < n/2:
node.val1 = a
node.val2 = a
else:
node.val1 = a
node.val2 = a + 1
i += 1
return sa
res = self.meta_interp(f, [32, 7])
assert res == f(32, 7)
def test_caching_setarrayitem_fixed(self):
myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node'])
def f(n, a):
i = sa = 0
node = [1, 2, 3]
while i < n:
myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node)
myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node)
sa += node[0] + node[1]
if i < n/2:
node[0] = a
node[1] = a
else:
node[0] = a
node[1] = a + 1
i += 1
return sa
res = self.meta_interp(f, [32, 7])
assert res == f(32, 7)
def test_caching_setarrayitem_var(self):
myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'b', 'node'])
def f(n, a, b):
i = sa = 0
node = [1, 2, 3]
while i < n:
myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, b=b, node=node)
myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, b=b, node=node)
sa += node[0] + node[b]
if i < n/2:
node[0] = a
node[b] = a
else:
node[0] = a
node[b] = a + 1
i += 1
return sa
res = self.meta_interp(f, [32, 7, 2])
assert res == f(32, 7, 2)
def test_getfield_result_with_intbound(self):
myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node'])
class A:
pass
def f(n, a):
i = sa = 0
node = A()
node.val1 = a
while i < n:
myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node)
myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node)
if node.val1 > 0:
sa += 1
if i > n/2:
node.val1 = -a
i += 1
return sa
res = self.meta_interp(f, [32, 7])
assert res == f(32, 7)
def test_getfield_result_constant(self):
myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node'])
class A:
pass
def f(n, a):
i = sa = 0
node = A()
node.val1 = 7
while i < n:
myjitdriver.can_enter_jit(sa=sa, i=i, n=n, a=a, node=node)
myjitdriver.jit_merge_point(sa=sa, i=i, n=n, a=a, node=node)
if node.val1 == 7:
sa += 1
if i > n/2:
node.val1 = -7
i += 1
return sa
res = self.meta_interp(f, [32, 7])
assert res == f(32, 7)
def test_overflowing_shift_pos(self):
myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa'])
def f1(a, b):
n = sa = 0
while n < 10:
myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa)
if 0 < a <= 5: pass
if 0 < b <= 5: pass
sa += (((((a << b) << b) << b) >> b) >> b) >> b
n += 1
return sa
def f2(a, b):
n = sa = 0
while n < 10:
myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa)
if 0 < a < promote(sys.maxint/2): pass
if 0 < b < 100: pass
sa += (((((a << b) << b) << b) >> b) >> b) >> b
n += 1
return sa
assert self.meta_interp(f1, [5, 5]) == 50
self.check_resops(int_rshift=0)
for f in (f1, f2):
assert self.meta_interp(f, [5, 6]) == 50
self.check_resops(int_rshift=3)
assert self.meta_interp(f, [10, 5]) == 100
self.check_resops(int_rshift=3)
assert self.meta_interp(f, [10, 6]) == 100
self.check_resops(int_rshift=3)
assert self.meta_interp(f, [5, 31]) == 0
self.check_resops(int_rshift=3)
bigval = 1
while is_valid_int(bigval << 3):
bigval = bigval << 1
assert self.meta_interp(f, [bigval, 5]) == 0
self.check_resops(int_rshift=3)
def test_overflowing_shift_neg(self):
myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'n', 'sa'])
def f1(a, b):
n = sa = 0
while n < 10:
myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa)
if -5 <= a < 0: pass
if 0 < b <= 5: pass
sa += (((((a << b) << b) << b) >> b) >> b) >> b
n += 1
return sa
def f2(a, b):
n = sa = 0
while n < 10:
myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa)
if -promote(sys.maxint/2) < a < 0: pass
if 0 < b < 100: pass
sa += (((((a << b) << b) << b) >> b) >> b) >> b
n += 1
return sa
assert self.meta_interp(f1, [-5, 5]) == -50
self.check_resops(int_rshift=0)
for f in (f1, f2):
assert self.meta_interp(f, [-5, 6]) == -50
self.check_resops(int_rshift=3)
assert self.meta_interp(f, [-10, 5]) == -100
self.check_resops(int_rshift=3)
assert self.meta_interp(f, [-10, 6]) == -100
self.check_resops(int_rshift=3)
assert self.meta_interp(f, [-5, 31]) == 0
self.check_resops(int_rshift=3)
bigval = 1
while is_valid_int(bigval << 3):
bigval = bigval << 1
assert self.meta_interp(f, [bigval, 5]) == 0
self.check_resops(int_rshift=3)
def test_pure_op_not_to_be_propagated(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'sa'])
def f(n):
sa = 0
while n > 0:
myjitdriver.jit_merge_point(n=n, sa=sa)
sa += n + 1
n -= 1
return sa
assert self.meta_interp(f, [10]) == f(10)
def test_inputarg_reset_bug(self):
## j = 0
## while j < 100:
## j += 1
## c = 0
## j = 0
## while j < 2:
## j += 1
## if c == 0:
## c = 1
## else:
## c = 0
## j = 0
## while j < 100:
## j += 1
def get_printable_location(i):
return str(i)
myjitdriver = JitDriver(greens = ['i'], reds = ['j', 'c', 'a'],
get_printable_location=get_printable_location)
bytecode = "0j10jc20a3"
def f():
set_param(myjitdriver, 'threshold', 7)
set_param(myjitdriver, 'trace_eagerness', 1)
i = j = c = a = 1
while True:
myjitdriver.jit_merge_point(i=i, j=j, c=c, a=a)
if i >= len(bytecode):
break
op = bytecode[i]
i += 1
if op == 'j':
j += 1
elif op == 'c':
promote(c)
c = 1 - c
elif op == '2':
if j < 3:
i -= 3
myjitdriver.can_enter_jit(i=i, j=j, c=c, a=a)
elif op == '1':
k = j*a
if j < 100:
i -= 2
a += k
myjitdriver.can_enter_jit(i=i, j=j, c=c, a=a)
else:
a += k*2
elif op == '0':
j = c = a = 0
elif op == 'a':
j += 1
a += 1
elif op == '3':
if a < 100:
i -= 2
myjitdriver.can_enter_jit(i=i, j=j, c=c, a=a)
else:
return ord(op)
return 42
assert f() == 42
def g():
res = 1
for i in range(10):
res = f()
return res
res = self.meta_interp(g, [])
assert res == 42
def test_read_timestamp(self):
import time
from rpython.rlib.rtimer import read_timestamp
def busy_loop():
start = time.time()
while time.time() - start < 0.1:
# busy wait
pass
def f():
t1 = read_timestamp()
busy_loop()
t2 = read_timestamp()
return t2 - t1 > 1000
res = self.interp_operations(f, [])
assert res
def test_bug688_multiple_immutable_fields(self):
myjitdriver = JitDriver(greens=[], reds=['counter','context'])
class Tag:
pass
class InnerContext():
_immutable_fields_ = ['variables','local_names']
def __init__(self, variables):
self.variables = variables
self.local_names = [0]
def store(self):
self.local_names[0] = 1
def retrieve(self):
variables = self.variables
promote(variables)
result = self.local_names[0]
if result == 0:
return -1
else:
return -1
def build():
context = InnerContext(Tag())
context.store()
counter = 0
while True:
myjitdriver.jit_merge_point(context=context, counter = counter)
context.retrieve()
context.retrieve()
counter += 1
if counter > 10:
return 7
assert self.meta_interp(build, []) == 7
self.check_resops(getfield_gc_pure_r=2)
def test_args_becomming_equal(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b'])
def f(n, a, b):
sa = i = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a, b=b)
sa += a
sa *= b
if i > n/2:
a = b
i += 1
return sa
assert self.meta_interp(f, [20, 1, 2]) == f(20, 1, 2)
def test_args_becomming_equal_boxed1(self):
class A(object):
def __init__(self, a, b):
self.a = a
self.b = b
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b', 'node'])
def f(n, a, b):
sa = i = 0
node = A(a,b)
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a, b=b, node=node)
sa += node.a
sa *= node.b
if i > n/2:
node = A(b, b)
else:
node = A(a, b)
i += 1
return sa
assert self.meta_interp(f, [20, 1, 2]) == f(20, 1, 2)
def test_args_becomming_not_equal_boxed1(self):
class A(object):
def __init__(self, a, b):
self.a = a
self.b = b
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b', 'node'])
def f(n, a, b):
sa = i = 0
node = A(b, b)
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a, b=b, node=node)
sa += node.a
sa *= node.b
if i > n/2:
node = A(a, b)
else:
node = A(b, b)
i += 1
return sa
assert self.meta_interp(f, [20, 1, 2]) == f(20, 1, 2)
def test_args_becomming_equal_boxed2(self):
class A(object):
def __init__(self, a, b):
self.a = a
self.b = b
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node'])
def f(n, a, b):
sa = i = 0
node = A(a, b)
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, node=node)
sa += node.a
sa *= node.b
if i > n/2:
node = A(node.b, node.b)
else:
node = A(node.b, node.a)
i += 1
return sa
assert self.meta_interp(f, [20, 1, 2]) == f(20, 1, 2)
def test_inlined_short_preamble_guard_needed_in_loop1(self):
class A(object):
def __init__(self, a):
self.a = a
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa',
'a', 'b'])
def f(n, a, b):
sa = i = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a, b=b)
if a.a < 10:
sa += a.a
b.a = i
i += 1
return sa
def g(n):
return f(n, A(5), A(10))
assert self.meta_interp(g, [20]) == g(20)
def test_ovf_guard_in_short_preamble2(self):
class A(object):
def __init__(self, val):
self.val = val
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'node1', 'node2'])
def f(n, a):
node1 = node2 = A(0)
sa = i = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a, node1=node1, node2=node2)
node2.val = 7
if a >= 100:
sa += 1
try:
sa += ovfcheck(i + i)
except OverflowError:
assert 0
node1 = A(i)
i += 1
assert self.meta_interp(f, [20, 7]) == f(20, 7)
def test_intbounds_generalized(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa'])
def f(n):
sa = i = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa)
if i > n/2:
sa += 1
else:
sa += 2
i += 1
return sa
assert self.meta_interp(f, [20]) == f(20)
self.check_resops(int_lt=4, int_le=0, int_ge=0, int_gt=4)
def test_intbounds_not_generalized1(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa'])
def f(n):
sa = i = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa)
if i > n/2:
sa += 1
else:
sa += 2
assert -100 < i < 100
i += 1
return sa
assert self.meta_interp(f, [20]) == f(20)
self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=5)
def test_intbounds_not_generalized2(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node'])
class A(object):
def __init__(self, val):
self.val = val
def f(n):
sa = i = 0
node = A(n)
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, node=node)
if i > n/2:
sa += 1
else:
sa += 2
assert -100 <= node.val <= 100
i += 1
return sa
assert self.meta_interp(f, [20]) == f(20)
self.check_resops(int_lt=4, int_le=3, int_ge=3, int_gt=4)
def test_retrace_limit1(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a'])
def f(n, limit):
set_param(myjitdriver, 'retrace_limit', limit)
sa = i = a = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a)
a = i/4
a = hint(a, promote=True)
sa += a
i += 1
return sa
assert self.meta_interp(f, [20, 2]) == f(20, 2)
self.check_jitcell_token_count(1)
self.check_target_token_count(4)
assert self.meta_interp(f, [20, 3]) == f(20, 3)
self.check_jitcell_token_count(1)
self.check_target_token_count(5)
def test_max_retrace_guards(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a'])
def f(n, limit):
set_param(myjitdriver, 'retrace_limit', 3)
set_param(myjitdriver, 'max_retrace_guards', limit)
sa = i = a = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a)
a = i/4
a = hint(a, promote=True)
sa += a
i += 1
return sa
assert self.meta_interp(f, [20, 1]) == f(20, 1)
self.check_jitcell_token_count(1)
self.check_target_token_count(2)
assert self.meta_interp(f, [20, 10]) == f(20, 10)
self.check_jitcell_token_count(1)
self.check_target_token_count(5)
def test_max_unroll_loops(self):
from rpython.jit.metainterp.optimize import InvalidLoop
from rpython.jit.metainterp import optimizeopt
myjitdriver = JitDriver(greens = [], reds = ['n', 'i'])
#
def f(n, limit):
set_param(myjitdriver, 'threshold', 5)
set_param(myjitdriver, 'max_unroll_loops', limit)
i = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i)
print i
i += 1
return i
#
def my_optimize_trace(*args, **kwds):
raise InvalidLoop
old_optimize_trace = optimizeopt.optimize_trace
optimizeopt.optimize_trace = my_optimize_trace
try:
res = self.meta_interp(f, [23, 4])
assert res == 23
self.check_trace_count(0)
self.check_aborted_count(3)
#
res = self.meta_interp(f, [23, 20])
assert res == 23
self.check_trace_count(0)
self.check_aborted_count(2)
finally:
optimizeopt.optimize_trace = old_optimize_trace
def test_max_unroll_loops_retry_without_unroll(self):
from rpython.jit.metainterp.optimize import InvalidLoop
from rpython.jit.metainterp import optimizeopt
myjitdriver = JitDriver(greens = [], reds = ['n', 'i'])
#
def f(n, limit):
set_param(myjitdriver, 'threshold', 5)
set_param(myjitdriver, 'max_unroll_loops', limit)
i = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i)
print i
i += 1
return i
#
seen = []
def my_optimize_trace(metainterp_sd, jitdriver_sd, data, memo=None):
seen.append('unroll' in data.enable_opts)
raise InvalidLoop
old_optimize_trace = optimizeopt.optimize_trace
optimizeopt.optimize_trace = my_optimize_trace
try:
if not self.basic:
py.test.skip("unrolling")
res = self.meta_interp(f, [23, 4])
assert res == 23
assert False in seen
assert True in seen
finally:
optimizeopt.optimize_trace = old_optimize_trace
def test_retrace_limit_with_extra_guards(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a',
'node'])
def f(n, limit):
set_param(myjitdriver, 'retrace_limit', limit)
sa = i = a = 0
node = [1, 2, 3]
node[1] = n
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa, a=a, node=node)
a = i/4
a = hint(a, promote=True)
if i&1 == 0:
sa += node[i%3]
sa += a
i += 1
return sa
assert self.meta_interp(f, [20, 2]) == f(20, 2)
self.check_jitcell_token_count(1)
self.check_target_token_count(4)
assert self.meta_interp(f, [20, 3]) == f(20, 3)
self.check_jitcell_token_count(1)
self.check_target_token_count(5)
def test_retrace_ending_up_retracing_another_loop(self):
myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'i', 'sa'])
bytecode = "0+sI0+SI"
def f(n):
set_param(None, 'threshold', 3)
set_param(None, 'trace_eagerness', 1)
set_param(None, 'retrace_limit', 5)
set_param(None, 'function_threshold', -1)
pc = sa = i = 0
while pc < len(bytecode):
myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i)
n = hint(n, promote=True)
op = bytecode[pc]
if op == '0':
i = 0
elif op == '+':
i += 1
elif op == 's':
sa += i
elif op == 'S':
sa += 2
elif op == 'I':
if i < n:
pc -= 2
myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i)
continue
pc += 1
return sa
def g(n1, n2):
for i in range(10):
f(n1)
for i in range(10):
f(n2)
nn = [10, 3]
assert self.meta_interp(g, nn) == g(*nn)
# The attempts of retracing first loop will end up retracing the
# second and thus fail 5 times, saturating the retrace_count. Instead a
# bridge back to the preamble of the first loop is produced. A guard in
# this bridge is later traced resulting in a failed attempt of retracing
# the second loop.
self.check_trace_count(8)
# FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times.
def g(n):
for i in range(n):
for j in range(10):
f(n-i)
res = self.meta_interp(g, [10])
assert res == g(10)
self.check_jitcell_token_count(2)
if 0:
for cell in get_stats().get_all_jitcell_tokens():
# Initialal trace with two labels and 5 retraces
assert len(cell.target_tokens) <= 7
def test_nested_retrace(self):
myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa'])
bytecode = "ij+Jj+JI"
def f(n, a):
set_param(None, 'threshold', 5)
set_param(None, 'trace_eagerness', 1)
set_param(None, 'retrace_limit', 2)
pc = sa = i = j = 0
while pc < len(bytecode):
myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i, j=j, a=a)
a = hint(a, promote=True)
op = bytecode[pc]
if op == 'i':
i = 0
elif op == 'j':
j = 0
elif op == '+':
sa += a
elif op == 'J':
j += 1
if j < 3:
pc -= 1
myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j, a=a)
continue
elif op == 'I':
i += 1
if i < n:
pc -= 6
myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i, j=j, a=a)
continue
pc += 1
return sa
res = self.meta_interp(f, [10, 7])
assert res == f(10, 7)
self.check_jitcell_token_count(2)
if self.basic:
for cell in get_stats().get_all_jitcell_tokens():
assert len(cell.target_tokens) == 2
def g(n):
return f(n, 2) + f(n, 3)
res = self.meta_interp(g, [10])
assert res == g(10)
self.check_jitcell_token_count(2)
if self.basic:
for cell in get_stats().get_all_jitcell_tokens():
assert len(cell.target_tokens) <= 3
def g(n):
return f(n, 2) + f(n, 3) + f(n, 4) + f(n, 5) + f(n, 6) + f(n, 7)
res = self.meta_interp(g, [10])
assert res == g(10)
# 2 loops and one function
self.check_jitcell_token_count(3)
cnt = 0
if self.basic:
for cell in get_stats().get_all_jitcell_tokens():
if cell.target_tokens is None:
cnt += 1
else:
assert len(cell.target_tokens) <= 4
assert cnt == 1
def test_frame_finished_during_retrace(self):
class Base(object):
pass
class A(Base):
def __init__(self, a):
self.val = a
self.num = 1
def inc(self):
return A(self.val + 1)
class B(Base):
def __init__(self, a):
self.val = a
self.num = 1000
def inc(self):
return B(self.val + 1)
myjitdriver = JitDriver(greens = [], reds = ['sa', 'a'])
def f():
set_param(None, 'threshold', 3)
set_param(None, 'trace_eagerness', 2)
a = A(0)
sa = 0
while a.val < 8:
myjitdriver.jit_merge_point(a=a, sa=sa)
a = a.inc()
if a.val > 4:
a = B(a.val)
sa += a.num
return sa
res = self.meta_interp(f, [])
assert res == f()
def test_frame_finished_during_continued_retrace(self):
class Base(object):
pass
class A(Base):
def __init__(self, a):
self.val = a
self.num = 100
def inc(self):
return A(self.val + 1)
class B(Base):
def __init__(self, a):
self.val = a
self.num = 10000
def inc(self):
return B(self.val + 1)
myjitdriver = JitDriver(greens = [], reds = ['sa', 'b', 'a'])
def f(b):
set_param(None, 'threshold', 6)
set_param(None, 'trace_eagerness', 4)
a = A(0)
sa = 0
while a.val < 15:
myjitdriver.jit_merge_point(a=a, b=b, sa=sa)
a = a.inc()
if a.val > 8:
a = B(a.val)
if b == 1:
b = 2
else:
b = 1
sa += a.num + b
return sa
res = self.meta_interp(f, [1])
assert res == f(1)
def test_remove_array_operations(self):
myjitdriver = JitDriver(greens = [], reds = ['a'])
class W_Int:
def __init__(self, intvalue):
self.intvalue = intvalue
def f(x):
a = [W_Int(x)]
while a[0].intvalue > 0:
myjitdriver.jit_merge_point(a=a)
a[0] = W_Int(a[0].intvalue - 3)
return a[0].intvalue
res = self.meta_interp(f, [100])
assert res == -2
self.check_resops(setarrayitem_gc=2, getarrayitem_gc_r=1)
def test_continue_tracing_with_boxes_in_start_snapshot_replaced_by_optimizer(self):
myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'a', 'b'])
def f(n):
sa = a = 0
b = 10
while n:
myjitdriver.jit_merge_point(sa=sa, n=n, a=a, b=b)
sa += b
b += 1
if b > 7:
pass
if a == 0:
a = 1
elif a == 1:
a = 2
elif a == 2:
a = 0
sa += a
sa += 0
n -= 1
return sa
res = self.meta_interp(f, [16])
assert res == f(16)
def test_loopinvariant_array_shrinking1(self):
myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'i', 'a'])
def f(n):
sa = i = 0
a = [0, 1, 2, 3, 4]
while i < n:
myjitdriver.jit_merge_point(sa=sa, n=n, a=a, i=i)
if i < n / 2:
sa += a[4]
elif i == n / 2:
a.pop()
i += 1
res = self.meta_interp(f, [32])
assert res == f(32)
self.check_resops(arraylen_gc=3)
def test_ulonglong_mod(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'a'])
class A:
pass
def f(n):
sa = i = rffi.cast(rffi.ULONGLONG, 1)
a = A()
while i < rffi.cast(rffi.ULONGLONG, n):
a.sa = sa
a.i = i
myjitdriver.jit_merge_point(n=n, a=a)
sa = a.sa
i = a.i
sa += sa % i
i += 1
res = self.meta_interp(f, [32])
assert res == f(32)
def test_int_signext(self):
def f(n):
return rffi.cast(rffi.SIGNEDCHAR, n)
def f1(n):
return rffi.cast(rffi.SIGNEDCHAR, n + 1)
res = self.interp_operations(f, [128])
assert res == -128
res = self.interp_operations(f1, [127])
assert res == -128
res = self.interp_operations(f, [-35 + 256 * 29])
assert res == -35
res = self.interp_operations(f, [127 - 256 * 29])
assert res == 127
class BaseLLtypeTests(BasicTests):
def test_identityhash(self):
A = lltype.GcStruct("A")
def f():
obj1 = lltype.malloc(A)
obj2 = lltype.malloc(A)
return lltype.identityhash(obj1) == lltype.identityhash(obj2)
assert not f()
res = self.interp_operations(f, [])
assert not res
def test_oops_on_nongc(self):
from rpython.rtyper.lltypesystem import lltype
TP = lltype.Struct('x')
def f(i1, i2):
p1 = prebuilt[i1]
p2 = prebuilt[i2]
a = p1 is p2
b = p1 is not p2
c = bool(p1)
d = not bool(p2)
return 1000*a + 100*b + 10*c + d
prebuilt = [lltype.malloc(TP, flavor='raw', immortal=True)] * 2
expected = f(0, 1)
assert self.interp_operations(f, [0, 1]) == expected
def test_casts(self):
py.test.skip("xxx fix or kill")
if not self.basic:
py.test.skip("test written in a style that "
"means it's frontend only")
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
TP = lltype.GcStruct('S1')
def f(p):
n = lltype.cast_ptr_to_int(p)
return n
x = lltype.malloc(TP)
xref = lltype.cast_opaque_ptr(llmemory.GCREF, x)
res = self.interp_operations(f, [xref])
y = llmemory.cast_ptr_to_adr(x)
y = llmemory.cast_adr_to_int(y)
assert rffi.get_real_int(res) == rffi.get_real_int(y)
#
TP = lltype.Struct('S2')
prebuilt = [lltype.malloc(TP, immortal=True),
lltype.malloc(TP, immortal=True)]
def f(x):
p = prebuilt[x]
n = lltype.cast_ptr_to_int(p)
return n
res = self.interp_operations(f, [1])
y = llmemory.cast_ptr_to_adr(prebuilt[1])
y = llmemory.cast_adr_to_int(y)
assert rffi.get_real_int(res) == rffi.get_real_int(y)
def test_collapsing_ptr_eq(self):
S = lltype.GcStruct('S')
p = lltype.malloc(S)
driver = JitDriver(greens = [], reds = ['n', 'x'])
def f(n, x):
while n > 0:
driver.can_enter_jit(n=n, x=x)
driver.jit_merge_point(n=n, x=x)
if x:
n -= 1
n -= 1
def main():
f(10, p)
f(10, lltype.nullptr(S))
self.meta_interp(main, [])
def test_enable_opts(self):
jitdriver = JitDriver(greens = [], reds = ['a'])
class A(object):
def __init__(self, i):
self.i = i
def f():
a = A(0)
while a.i < 10:
jitdriver.jit_merge_point(a=a)
jitdriver.can_enter_jit(a=a)
a = A(a.i + 1)
self.meta_interp(f, [])
self.check_resops(new_with_vtable=0)
self.meta_interp(f, [], enable_opts='')
self.check_resops(new_with_vtable=1)
def test_two_loopinvariant_arrays1(self):
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'i', 'a'])
TP = lltype.GcArray(lltype.Signed)
def f(n):
sa = i = 0
a = lltype.malloc(TP, 5)
a[4] = 7
while i < n:
myjitdriver.jit_merge_point(sa=sa, n=n, a=a, i=i)
if i < n/2:
sa += a[4]
if i == n/2:
a = lltype.malloc(TP, 3)
i += 1
return sa
res = self.meta_interp(f, [32])
assert res == f(32)
self.check_trace_count(2)
def test_two_loopinvariant_arrays2(self):
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'i', 'a'])
TP = lltype.GcArray(lltype.Signed)
def f(n):
sa = i = 0
a = lltype.malloc(TP, 5)
a[4] = 7
while i < n:
myjitdriver.jit_merge_point(sa=sa, n=n, a=a, i=i)
if i < n/2:
sa += a[4]
elif i > n/2:
sa += a[2]
if i == n/2:
a = lltype.malloc(TP, 3)
a[2] = 42
i += 1
return sa
res = self.meta_interp(f, [32])
assert res == f(32)
self.check_trace_count(2)
def test_two_loopinvariant_arrays3(self):
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'i', 'a'])
TP = lltype.GcArray(lltype.Signed)
def f(n):
sa = i = 0
a = lltype.malloc(TP, 5)
a[2] = 7
while i < n:
myjitdriver.jit_merge_point(sa=sa, n=n, a=a, i=i)
if i < n/2:
sa += a[2]
elif i > n/2:
sa += a[3]
if i == n/2:
a = lltype.malloc(TP, 7)
a[3] = 10
a[2] = 42
i += 1
return sa
res = self.meta_interp(f, [32])
assert res == f(32)
self.check_trace_count(3)
def test_two_loopinvariant_arrays_boxed(self):
class A(object):
def __init__(self, a):
self.a = a
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'i', 'a'])
TP = lltype.GcArray(lltype.Signed)
a1 = A(lltype.malloc(TP, 5))
a2 = A(lltype.malloc(TP, 3))
def f(n):
sa = i = 0
a = a1
a.a[4] = 7
while i < n:
myjitdriver.jit_merge_point(sa=sa, n=n, a=a, i=i)
if i < n/2:
sa += a.a[4]
if i == n/2:
a = a2
i += 1
return sa
res = self.meta_interp(f, [32])
assert res == f(32)
self.check_resops(arraylen_gc=2)
def test_release_gil_flush_heap_cache(self):
T = rffi.CArrayPtr(rffi.TIME_T)
external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True)
# Not a real lock, has all the same properties with respect to GIL
# release though, so good for this test.
class Lock(object):
@dont_look_inside
def acquire(self):
external(lltype.nullptr(T.TO))
@dont_look_inside
def release(self):
external(lltype.nullptr(T.TO))
class X(object):
def __init__(self, idx):
self.field = idx
@dont_look_inside
def get_obj(z):
return X(z)
myjitdriver = JitDriver(greens=[], reds=["n", "l", "z", "lock"])
def f(n, z):
lock = Lock()
l = 0
while n > 0:
myjitdriver.jit_merge_point(lock=lock, l=l, n=n, z=z)
x = get_obj(z)
l += x.field
lock.acquire()
# This must not reuse the previous one.
n -= x.field
lock.release()
return n
res = self.meta_interp(f, [10, 1])
self.check_resops(getfield_gc_i=4)
assert res == f(10, 1)
def test_jit_merge_point_with_raw_pointer(self):
driver = JitDriver(greens = [], reds = ['n', 'x'])
TP = lltype.Array(lltype.Signed, hints={'nolength': True})
def f(n):
x = lltype.malloc(TP, 10, flavor='raw')
x[0] = 1
while n > 0:
driver.jit_merge_point(n=n, x=x)
n -= x[0]
lltype.free(x, flavor='raw')
return n
self.meta_interp(f, [10], repeat=3)
def test_jit_merge_point_with_pbc(self):
driver = JitDriver(greens = [], reds = ['x'])
class A(object):
def __init__(self, x):
self.x = x
def _freeze_(self):
return True
pbc = A(1)
def main(x):
return f(x, pbc)
def f(x, pbc):
while x > 0:
driver.jit_merge_point(x = x)
x -= pbc.x
return x
self.meta_interp(main, [10])
def test_look_inside_iff_const(self):
@look_inside_iff(lambda arg: isconstant(arg))
def f(arg):
s = 0
while arg > 0:
s += arg
arg -= 1
return s
driver = JitDriver(greens = ['code'], reds = ['n', 'arg', 's'])
def main(code, n, arg):
s = 0
while n > 0:
driver.jit_merge_point(code=code, n=n, arg=arg, s=s)
if code == 0:
s += f(arg)
else:
s += f(1)
n -= 1
return s
res = self.meta_interp(main, [0, 10, 2], enable_opts='')
assert res == main(0, 10, 2)
self.check_resops(call_i=1)
res = self.meta_interp(main, [1, 10, 2], enable_opts='')
assert res == main(1, 10, 2)
self.check_resops(call_i=0)
def test_look_inside_iff_const_float(self):
@look_inside_iff(lambda arg: isconstant(arg))
def f(arg):
return arg + 0.5
driver = JitDriver(greens = [], reds = ['n', 'total'])
def main(n):
total = 0.0
while n > 0:
driver.jit_merge_point(n=n, total=total)
total = f(total)
n -= 1
return total
res = self.meta_interp(main, [10], enable_opts='')
assert res == 5.0
self.check_resops(call_f=1)
def test_look_inside_iff_virtual(self):
# There's no good reason for this to be look_inside_iff, but it's a test!
@look_inside_iff(lambda arg, n: isvirtual(arg))
def f(arg, n):
if n == 100:
for i in xrange(n):
n += i
return arg.x
class A(object):
def __init__(self, x):
self.x = x
driver = JitDriver(greens=['n'], reds=['i', 'a'])
def main(n):
i = 0
a = A(3)
while i < 20:
driver.jit_merge_point(i=i, n=n, a=a)
if n == 0:
i += f(a, n)
else:
i += f(A(2), n)
res = self.meta_interp(main, [0], enable_opts='')
assert res == main(0)
self.check_resops(call_i=1, getfield_gc_i=0)
res = self.meta_interp(main, [1], enable_opts='')
assert res == main(1)
self.check_resops(call_i=0, getfield_gc_i=0)
def test_isvirtual_call_assembler(self):
driver = JitDriver(greens = ['code'], reds = ['n', 's'])
@look_inside_iff(lambda t1, t2: isvirtual(t1))
def g(t1, t2):
return t1[0] == t2[0]
def create(n):
return (1, 2, n)
create._dont_inline_ = True
def f(code, n):
s = 0
while n > 0:
driver.can_enter_jit(code=code, n=n, s=s)
driver.jit_merge_point(code=code, n=n, s=s)
t = create(n)
if code:
f(0, 3)
s += t[2]
g(t, (1, 2, n))
n -= 1
return s
self.meta_interp(f, [1, 10], inline=True)
self.check_resops(call_i=0, call_may_force_i=0, call_assembler_i=2)
def test_reuse_elidable_result(self):
driver = JitDriver(reds=['n', 's'], greens = [])
def main(n):
s = 0
while n > 0:
driver.jit_merge_point(s=s, n=n)
s += len(str(n)) + len(str(n))
n -= 1
return s
res = self.meta_interp(main, [10])
assert res == main(10)
self.check_resops({'int_gt': 2, 'strlen': 2, 'guard_true': 2,
'int_sub': 2, 'jump': 1, 'call_r': 2,
'guard_no_exception': 2, 'int_add': 4})
def test_elidable_method(self):
py.test.skip("not supported so far: @elidable methods")
class A(object):
@elidable
def meth(self):
return 41
class B(A):
@elidable
def meth(self):
return 42
x = B()
def callme(x):
return x.meth()
def f():
callme(A())
return callme(x)
res = self.interp_operations(f, [])
assert res == 42
self.check_operations_history({'finish': 1})
def test_look_inside_iff_const_getarrayitem_gc_pure(self):
driver = JitDriver(greens=['unroll'], reds=['s', 'n'])
class A(object):
_immutable_fields_ = ["x[*]"]
def __init__(self, x):
self.x = [x]
@look_inside_iff(lambda x: isconstant(x))
def f(x):
i = 0
for c in x:
i += 1
return i
def main(unroll, n):
s = 0
while n > 0:
driver.jit_merge_point(s=s, n=n, unroll=unroll)
if unroll:
x = A("xx")
else:
x = A("x" * n)
s += f(x.x[0])
n -= 1
return s
res = self.meta_interp(main, [0, 10])
assert res == main(0, 10)
# 2 calls, one for f() and one for char_mul
self.check_resops(call_i=2, call_r=2)
res = self.meta_interp(main, [1, 10])
assert res == main(1, 10)
self.check_resops(call_i=0, call_r=0)
def test_setarrayitem_followed_by_arraycopy(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'x', 'y'])
def f(n):
sa = 0
x = [1,2,n]
y = [1,2,3]
while n > 0:
myjitdriver.jit_merge_point(sa=sa, n=n, x=x, y=y)
y[0] = n
x[0:3] = y
sa += x[0]
n -= 1
return sa
res = self.meta_interp(f, [16])
assert res == f(16)
def test_ptr_eq(self):
myjitdriver = JitDriver(greens = [], reds = ["n", "x"])
class A(object):
def __init__(self, v):
self.v = v
def f(n, x):
while n > 0:
myjitdriver.jit_merge_point(n=n, x=x)
z = 0 / x
a1 = A("key")
a2 = A("\x00")
n -= [a1, a2][z].v is not a2.v
return n
res = self.meta_interp(f, [10, 1])
assert res == 0
def test_instance_ptr_eq(self):
myjitdriver = JitDriver(greens = [], reds = ["n", "i", "a1", "a2"])
class A(object):
pass
def f(n):
a1 = A()
a2 = A()
i = 0
while n > 0:
myjitdriver.jit_merge_point(n=n, i=i, a1=a1, a2=a2)
if n % 2:
a = a2
else:
a = a1
i += a is a1
n -= 1
return i
res = self.meta_interp(f, [10])
assert res == f(10)
def f(n):
a1 = A()
a2 = A()
i = 0
while n > 0:
myjitdriver.jit_merge_point(n=n, i=i, a1=a1, a2=a2)
if n % 2:
a = a2
else:
a = a1
if a is a2:
i += 1
n -= 1
return i
res = self.meta_interp(f, [10])
assert res == f(10)
def test_virtual_array_of_structs(self):
myjitdriver = JitDriver(greens = [], reds=["n", "d"])
def f(n):
d = None
while n > 0:
myjitdriver.jit_merge_point(n=n, d=d)
d = {"q": 1}
if n % 2:
d["k"] = n
else:
d["z"] = n
n -= len(d) - d["q"]
return n
res = self.meta_interp(f, [10])
assert res == 0
def test_virtual_dict_constant_keys(self):
myjitdriver = JitDriver(greens = [], reds = ["n"])
def g(d):
return d["key"] - 1
def f(n):
while n > 0:
myjitdriver.jit_merge_point(n=n)
x = {"key": n}
n = g(x)
del x["key"]
return n
res = self.meta_interp(f, [10])
assert res == 0
self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2,
'int_sub': 2})
def test_virtual_opaque_ptr(self):
myjitdriver = JitDriver(greens = [], reds = ["n"])
erase, unerase = rerased.new_erasing_pair("x")
@look_inside_iff(lambda x: isvirtual(x))
def g(x):
return x[0]
def f(n):
while n > 0:
myjitdriver.jit_merge_point(n=n)
x = []
y = erase(x)
z = unerase(y)
z.append(1)
n -= g(z)
return n
res = self.meta_interp(f, [10])
assert res == 0
self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2,
'int_sub': 2})
def test_virtual_opaque_dict(self):
myjitdriver = JitDriver(greens = [], reds = ["n"])
erase, unerase = rerased.new_erasing_pair("x")
@look_inside_iff(lambda x: isvirtual(x))
def g(x):
return x[0]["key"] - 1
def f(n):
while n > 0:
myjitdriver.jit_merge_point(n=n)
x = [{}]
x[0]["key"] = n
x[0]["other key"] = n
y = erase(x)
z = unerase(y)
n = g(x)
return n
res = self.meta_interp(f, [10])
assert res == 0
self.check_resops({'int_gt': 2, 'getfield_gc_i': 1, 'int_eq': 1,
'guard_true': 2, 'int_sub': 2, 'jump': 1,
'guard_false': 1})
def test_virtual_after_bridge(self):
myjitdriver = JitDriver(greens = [], reds = ["n"])
@look_inside_iff(lambda x: isvirtual(x))
def g(x):
return x[0]
def f(n):
while n > 0:
myjitdriver.jit_merge_point(n=n)
x = [1]
if n & 1: # bridge
n -= g(x)
else:
n -= g(x)
return n
res = self.meta_interp(f, [10])
assert res == 0
self.check_resops(call_i=0, call_may_force_i=0, new_array=0)
def test_convert_from_SmallFunctionSetPBCRepr_to_FunctionsPBCRepr(self):
f1 = lambda n: n+1
f2 = lambda n: n+2
f3 = lambda n: n+3
f4 = lambda n: n+4
f5 = lambda n: n+5
f6 = lambda n: n+6
f7 = lambda n: n+7
f8 = lambda n: n+8
def h(n, x):
return x(n)
h._dont_inline = True
def g(n, x):
return h(n, x)
g._dont_inline = True
def f(n):
n = g(n, f1)
n = g(n, f2)
n = h(n, f3)
n = h(n, f4)
n = h(n, f5)
n = h(n, f6)
n = h(n, f7)
n = h(n, f8)
return n
assert f(5) == 41
translationoptions = {'withsmallfuncsets': 3}
self.interp_operations(f, [5], translationoptions=translationoptions)
def test_annotation_gives_class_knowledge_to_tracer(self):
py.test.skip("disabled")
class Base(object):
pass
class A(Base):
def f(self):
return self.a
def g(self):
return self.a + 1
class B(Base):
def f(self):
return self.b
def g(self):
return self.b + 1
class C(B):
def f(self):
self.c += 1
return self.c
def g(self):
return self.c + 1
@dont_look_inside
def make(x):
if x > 0:
a = A()
a.a = x + 1
elif x < 0:
a = B()
a.b = -x
else:
a = C()
a.c = 10
return a
def f(x):
a = make(x)
if x > 0:
assert isinstance(a, A)
z = a.f()
elif x < 0:
assert isinstance(a, B)
z = a.f()
else:
assert isinstance(a, C)
z = a.f()
return z + a.g()
res1 = f(6)
res2 = self.interp_operations(f, [6])
assert res1 == res2
self.check_operations_history(guard_class=0, record_exact_class=1)
res1 = f(-6)
res2 = self.interp_operations(f, [-6])
assert res1 == res2
# cannot use record_exact_class here, because B has a subclass
self.check_operations_history(guard_class=1)
res1 = f(0)
res2 = self.interp_operations(f, [0])
assert res1 == res2
# here it works again
self.check_operations_history(guard_class=0, record_exact_class=1)
def test_give_class_knowledge_to_tracer_explicitly(self):
from rpython.rtyper.lltypesystem.lloperation import llop
class Base(object):
def f(self):
raise NotImplementedError
def g(self):
raise NotImplementedError
class A(Base):
def f(self):
return self.a
def g(self):
return self.a + 1
class B(Base):
def f(self):
return self.b
def g(self):
return self.b + 1
class C(B):
def f(self):
self.c += 1
return self.c
def g(self):
return self.c + 1
@dont_look_inside
def make(x):
if x > 0:
a = A()
a.a = x + 1
elif x < 0:
a = B()
a.b = -x
else:
a = C()
a.c = 10
return a
def f(x):
a = make(x)
if x > 0:
record_exact_class(a, A)
z = a.f()
elif x < 0:
record_exact_class(a, B)
z = a.f()
else:
record_exact_class(a, C)
z = a.f()
return z + a.g()
res1 = f(6)
res2 = self.interp_operations(f, [6])
assert res1 == res2
self.check_operations_history(guard_class=0, record_exact_class=1)
res1 = f(-6)
res2 = self.interp_operations(f, [-6])
assert res1 == res2
self.check_operations_history(guard_class=0, record_exact_class=1)
res1 = f(0)
res2 = self.interp_operations(f, [0])
assert res1 == res2
# here it works again
self.check_operations_history(guard_class=0, record_exact_class=1)
def test_generator(self):
def g(n):
yield n+1
yield n+2
yield n+3
def f(n):
gen = g(n)
return gen.next() * gen.next() * gen.next()
res = self.interp_operations(f, [10])
assert res == 11 * 12 * 13
self.check_operations_history(int_add=3, int_mul=2)
def test_setinteriorfield(self):
A = lltype.GcArray(lltype.Struct('S', ('x', lltype.Signed)))
a = lltype.malloc(A, 5, immortal=True)
def g(n):
a[n].x = n + 2
return a[n].x
res = self.interp_operations(g, [1])
assert res == 3
def test_float_bytes(self):
from rpython.rlib.rfloat import isnan
def f(n):
ll = float2longlong(n)
return longlong2float(ll)
for x in [2.5, float("nan"), -2.5, float("inf")]:
# There are tests elsewhere to verify the correctness of this.
res = self.interp_operations(f, [x])
assert res == x or isnan(x) and isnan(res)
class TestLLtype(BaseLLtypeTests, LLJitMixin):
def test_tagged(self):
py.test.skip("tagged unsupported")
from rpython.rlib.objectmodel import UnboxedValue
class Base(object):
__slots__ = ()
class Int(UnboxedValue, Base):
__slots__ = ["a"]
def is_pos(self):
return self.a > 0
def dec(self):
try:
return Int(self.a - 1)
except OverflowError:
raise
class Float(Base):
def __init__(self, a):
self.a = a
def is_pos(self):
return self.a > 0
def dec(self):
return Float(self.a - 1)
driver = JitDriver(greens=['pc', 's'], reds=['o'])
def main(fl, n, s):
if s:
s = "--j"
else:
s = "---j"
if fl:
o = Float(float(n))
else:
o = Int(n)
pc = 0
while True:
driver.jit_merge_point(s=s, pc=pc, o=o)
c = s[pc]
if c == "j":
driver.can_enter_jit(s=s, pc=pc, o=o)
if o.is_pos():
pc = 0
continue
else:
break
elif c == "-":
o = o.dec()
pc += 1
return pc
topt = {'taggedpointers': True}
res = self.meta_interp(main, [False, 100, True],
translationoptions=topt)
def test_rerased(self):
eraseX, uneraseX = rerased.new_erasing_pair("X")
#
class X:
def __init__(self, a, b):
self.a = a
self.b = b
#
def f(i, j):
# 'j' should be 0 or 1, not other values
if j > 0:
e = eraseX(X(i, j))
else:
try:
e = rerased.erase_int(i)
except OverflowError:
return -42
if j & 1:
x = uneraseX(e)
return x.a - x.b
else:
return rerased.unerase_int(e)
#
topt = {'taggedpointers': True}
x = self.interp_operations(f, [-128, 0], translationoptions=topt)
assert x == -128
bigint = sys.maxint//2 + 1
x = self.interp_operations(f, [bigint, 0], translationoptions=topt)
assert x == -42
x = self.interp_operations(f, [1000, 1], translationoptions=topt)
assert x == 999
def test_retracing_bridge_from_interpreter_to_finnish(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa'])
def f(n):
sa = i = 0
while i < n:
myjitdriver.jit_merge_point(n=n, i=i, sa=sa)
n = hint(n, promote=True)
sa += 2*n
i += 1
return sa
def g(n):
return f(n) + f(n) + f(n) + f(n) + f(10*n) + f(11*n)
res = self.meta_interp(g, [1], repeat=3)
assert res == g(1)
#self.check_jitcell_token_count(1)
self.check_jitcell_token_count(2)
# XXX A bridge from the interpreter to a finish is first
# constructed for n=1. It is later replaced with a trace for
# the case n=10 which is extended with a retrace for n=11 and
# finnaly a new bridge to finnish is again traced and created
# for the case n=1. We were not able to reuse the orignial n=1
# bridge as a preamble since it does not start with a
# label. The alternative would be to have all such bridges
# start with labels. I dont know which is better...
def test_ll_arraycopy(self):
A = lltype.GcArray(lltype.Char)
a = lltype.malloc(A, 10)
for i in range(10): a[i] = chr(i)
b = lltype.malloc(A, 10)
#
def f(c, d, e):
rgc.ll_arraycopy(a, b, c, d, e)
return 42
self.interp_operations(f, [1, 2, 3])
self.check_operations_history(call_n=1, guard_no_exception=0)
def test_weakref(self):
import weakref
class A(object):
def __init__(self, x):
self.x = x
def f(i):
a = A(i)
w = weakref.ref(a)
return w().x + a.x
assert self.interp_operations(f, [3]) == 6
def test_gc_add_memory_pressure(self):
def f():
rgc.add_memory_pressure(1234)
return 3
self.interp_operations(f, [])
def test_external_call(self):
from rpython.rlib.objectmodel import invoke_around_extcall
TIME_T = lltype.Signed
# ^^^ some 32-bit platforms have a 64-bit rffi.TIME_T, but we
# don't want that here; we just want always a Signed value
T = rffi.CArrayPtr(TIME_T)
external = rffi.llexternal("time", [T], TIME_T)
class Oups(Exception):
pass
class State:
pass
state = State()
def before():
if we_are_jitted():
raise Oups
state.l.append("before")
def after():
if we_are_jitted():
raise Oups
state.l.append("after")
def f():
state.l = []
invoke_around_extcall(before, after)
external(lltype.nullptr(T.TO))
return len(state.l)
res = self.interp_operations(f, [])
assert res == 2
res = self.interp_operations(f, [])
assert res == 2
self.check_operations_history(call_release_gil_i=1, call_may_force_i=0)
def test_unescaped_write_zero(self):
class A:
pass
def g():
return A()
@dont_look_inside
def escape():
print "hi!"
def f(n):
a = g()
a.x = n
escape()
a.x = 0
escape()
return a.x
res = self.interp_operations(f, [42])
assert res == 0
def test_conditions_without_guards(self):
def f(n):
if (n == 1) | (n == 3) | (n == 17):
return 42
return 5
res = self.interp_operations(f, [17])
assert res == 42
self.check_operations_history(guard_true=1, guard_false=0)
def test_not_in_trace(self):
class X:
n = 0
def g(x):
if we_are_jitted():
raise NotImplementedError
x.n += 1
g.oopspec = 'jit.not_in_trace()'
jitdriver = JitDriver(greens=[], reds=['n', 'token', 'x'])
def f(n):
token = 0
x = X()
while n >= 0:
jitdriver.jit_merge_point(n=n, x=x, token=token)
if not we_are_jitted():
token += 1
g(x)
n -= 1
return x.n + token * 1000
res = self.meta_interp(f, [10])
assert res == 2003 # two runs before jitting; then one tracing run
self.check_resops(int_add=0, call_i=0, call_may_force_i=0,
call_r=0, call_may_force_r=0, call_f=0,
call_may_force_f=0)
def test_not_in_trace_exception(self):
def g():
if we_are_jitted():
raise NotImplementedError
raise ValueError
g.oopspec = 'jit.not_in_trace()'
jitdriver = JitDriver(greens=[], reds=['n'])
def f(n):
while n >= 0:
jitdriver.jit_merge_point(n=n)
try:
g()
except ValueError:
n -= 1
return 42
res = self.meta_interp(f, [10])
assert res == 42
self.check_aborted_count(3)
def test_not_in_trace_blackhole(self):
class X:
seen = 0
def g(x):
if we_are_jitted():
raise NotImplementedError
x.seen = 42
g.oopspec = 'jit.not_in_trace()'
jitdriver = JitDriver(greens=[], reds=['n'])
def f(n):
while n >= 0:
jitdriver.jit_merge_point(n=n)
n -= 1
x = X()
g(x)
return x.seen
res = self.meta_interp(f, [10])
assert res == 42
def test_int_force_ge_zero(self):
def f(n):
return int_force_ge_zero(n)
res = self.interp_operations(f, [42])
assert res == 42
res = self.interp_operations(f, [-42])
assert res == 0
def test_cmp_fastpaths(self):
class Z: pass
def make_int(cmp):
def f(x):
if cmp == 'eq':
return x == x and x == x
if cmp == 'ne':
return x != x or x != x
if cmp == 'lt':
return x < x or x != x
if cmp == 'le':
return x <= x and x <= x
if cmp == 'gt':
return x > x or x > x
if cmp == 'ge':
return x >= x and x >= x
assert 0
return f
def make_str(cmp):
def f(x):
x = str(x)
if cmp == 'eq':
return x is x or x is x
if cmp == 'ne':
return x is not x and x is not x
assert 0
return f
def make_object(cmp):
def f(x):
y = Z()
y.x = x
x = y
if cmp == 'eq':
return x is x
if cmp == 'ne':
return x is not x
assert 0
return f
for cmp in 'eq ne lt le gt ge'.split():
f = make_int(cmp)
res = self.interp_operations(f, [42])
assert res == f(42)
opname = "int_%s" % cmp
self.check_operations_history(**{opname: 0})
for cmp in 'eq ne'.split():
f = make_str(cmp)
res = self.interp_operations(f, [42])
assert res == f(42)
opname = "ptr_%s" % cmp
self.check_operations_history(**{opname: 0})
f = make_object(cmp)
res = self.interp_operations(f, [42])
assert res == f(42)
opname = "instance_ptr_%s" % cmp
self.check_operations_history(**{opname: 0})
def test_compile_framework_9(self):
class X(object):
def __init__(self, x=0):
self.x = x
next = None
class CheckError(Exception):
pass
def check(flag):
if not flag:
raise CheckError
def before(n, x):
return n, x, None, None, None, None, None, None, None, None, [X(123)], None
def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
if n < 1900:
check(l[0].x == 123)
num = 512 + (n & 7)
l = [None] * num
l[0] = X(123)
l[1] = X(n)
l[2] = X(n+10)
l[3] = X(n+20)
l[4] = X(n+30)
l[5] = X(n+40)
l[6] = X(n+50)
l[7] = X(n+60)
l[num-8] = X(n+70)
l[num-9] = X(n+80)
l[num-10] = X(n+90)
l[num-11] = X(n+100)
l[-12] = X(n+110)
l[-13] = X(n+120)
l[-14] = X(n+130)
l[-15] = X(n+140)
if n < 1800:
num = 512 + (n & 7)
check(len(l) == num)
check(l[0].x == 123)
check(l[1].x == n)
check(l[2].x == n+10)
check(l[3].x == n+20)
check(l[4].x == n+30)
check(l[5].x == n+40)
check(l[6].x == n+50)
check(l[7].x == n+60)
check(l[num-8].x == n+70)
check(l[num-9].x == n+80)
check(l[num-10].x == n+90)
check(l[num-11].x == n+100)
check(l[-12].x == n+110)
check(l[-13].x == n+120)
check(l[-14].x == n+130)
check(l[-15].x == n+140)
n -= x.foo
return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
check(len(l) >= 512)
check(l[0].x == 123)
check(l[1].x == 2)
check(l[2].x == 12)
check(l[3].x == 22)
check(l[4].x == 32)
check(l[5].x == 42)
check(l[6].x == 52)
check(l[7].x == 62)
check(l[-8].x == 72)
check(l[-9].x == 82)
check(l[-10].x == 92)
check(l[-11].x == 102)
check(l[-12].x == 112)
check(l[-13].x == 122)
check(l[-14].x == 132)
check(l[-15].x == 142)
def allfuncs(num, n):
x = X()
x.foo = 2
main_allfuncs(num, n, x)
x.foo = 5
return x
def main_allfuncs(num, n, x):
n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s = before(n, x)
while n > 0:
myjitdriver.can_enter_jit(num=num, n=n, x=x, x0=x0, x1=x1,
x2=x2, x3=x3, x4=x4, x5=x5, x6=x6, x7=x7, l=l, s=s)
myjitdriver.jit_merge_point(num=num, n=n, x=x, x0=x0, x1=x1,
x2=x2, x3=x3, x4=x4, x5=x5, x6=x6, x7=x7, l=l, s=s)
n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s = f(
n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s)
after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s)
myjitdriver = JitDriver(greens = ['num'],
reds = ['n', 'x', 'x0', 'x1', 'x2', 'x3', 'x4',
'x5', 'x6', 'x7', 'l', 's'])
self.meta_interp(allfuncs, [9, 2000])
def test_unichar_ord_is_never_signed_on_64bit(self):
import sys
if sys.maxunicode == 0xffff:
py.test.skip("test for 32-bit unicodes")
def f(x):
return ord(rffi.cast(lltype.UniChar, x))
res = self.interp_operations(f, [-1])
if sys.maxint == 2147483647:
assert res == -1
else:
assert res == 4294967295
| mit | 8,601,289,197,060,888,000 | 31.582473 | 96 | 0.428545 | false |
JoshBorke/redline | budgets/models.py | 1 | 3890 | import datetime
from decimal import Decimal
from django.db import models
from redline.categories.models import Category, StandardMetadata, ActiveManager
from redline.transactions.models import Transaction
class BudgetManager(ActiveManager):
def most_current_for_date(self, date):
return super(BudgetManager, self).get_query_set().filter(start_date__lte=date).latest('start_date')
def estimates_per_year(self):
estimate = Decimal('0.0')
for budget in super(BudgetManager, self).get_query_set():
estimate = estimate + budget.yearly_estimated_total()
return estimate
class Budget(StandardMetadata):
"""
An object representing a budget.
Only estimates are tied to a budget object, which allows different budgets
to be applied to the same set of transactions for comparision.
"""
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
category = models.ForeignKey(Category)
amount = models.DecimalField(max_digits=11, decimal_places=2)
start_date = models.DateTimeField(default=datetime.datetime.now, db_index=True)
objects = models.Manager()
active = BudgetManager()
def __unicode__(self):
return self.name
def monthly_estimated_total(self):
total = Decimal('0.0')
total = total + self.amount
return total
def yearly_estimated_total(self):
return self.monthly_estimated_total() * 12
def estimates_and_transactions(self, start_date, end_date):
estimates_and_transactions = []
actual_total = Decimal('0.0')
actual_amount = self.actual_amount(start_date, end_date)
estimates_and_transactions.append({
'estimate': self.amount,
'transactions': self.actual_transactions(start_date, end_date),
'actual_amount': actual_amount,
})
return (estimates_and_transactions, actual_total)
def actual_total(self, start_date, end_date):
actual_total = Decimal('0.0')
actual_total += self.actual_amount(start_date, end_date)
return actual_total
def yearly_estimated_amount(self):
return self.amount * 12
def actual_transactions(self, start_date, end_date):
# Estimates should only report on expenses to prevent incomes from
# (incorrectly) artificially inflating totals.
return Transaction.expenses.filter(category=self.category, date__range=(start_date, end_date)).order_by('date')
def actual_amount(self, start_date, end_date):
total = Decimal('0.0')
for transaction in self.actual_transactions(start_date, end_date):
total += transaction.amount
return total
class BudgetEstimate(StandardMetadata):
"""
The individual line items that make up a budget.
Some examples include possible items like "Mortgage", "Rent", "Food", "Misc"
and "Car Payment".
"""
budget = models.ForeignKey(Budget, related_name='estimates')
category = models.ForeignKey(Category, related_name='estimates')
amount = models.DecimalField(max_digits=11, decimal_places=2)
objects = models.Manager()
active = ActiveManager()
def __unicode__(self):
return u"%s - %s" % (self.category.name, self.amount)
def yearly_estimated_amount(self):
return self.amount * 12
def actual_transactions(self, start_date, end_date):
# Estimates should only report on expenses to prevent incomes from
# (incorrectly) artificially inflating totals.
return Transaction.expenses.filter(category=self.category, date__range=(start_date, end_date)).order_by('date')
def actual_amount(self, start_date, end_date):
total = Decimal('0.0')
for transaction in self.actual_transactions(start_date, end_date):
total += transaction.amount
return total
| gpl-3.0 | 9,138,809,675,651,087,000 | 35.018519 | 119 | 0.674807 | false |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/thirdparty/gntp/__init__.py | 1 | 13824 | import re
import hashlib
import time
import StringIO
__version__ = '0.8'
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
GNTP_INFO_LINE = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' +
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?' +
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n',
re.IGNORECASE
)
GNTP_INFO_LINE_SHORT = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',
re.IGNORECASE
)
GNTP_HEADER = re.compile('([\w-]+):(.+)')
GNTP_EOL = '\r\n'
class BaseError(Exception):
def gntp_error(self):
error = GNTPError(self.errorcode, self.errordesc)
return error.encode()
class ParseError(BaseError):
errorcode = 500
errordesc = 'Error parsing the message'
class AuthError(BaseError):
errorcode = 400
errordesc = 'Error with authorization'
class UnsupportedError(BaseError):
errorcode = 500
errordesc = 'Currently unsupported by gntp.py'
class _GNTPBuffer(StringIO.StringIO):
"""GNTP Buffer class"""
def writefmt(self, message="", *args):
"""Shortcut function for writing GNTP Headers"""
self.write((message % args).encode('utf8', 'replace'))
self.write(GNTP_EOL)
class _GNTPBase(object):
"""Base initilization
:param string messagetype: GNTP Message type
:param string version: GNTP Protocol version
:param string encription: Encryption protocol
"""
def __init__(self, messagetype=None, version='1.0', encryption=None):
self.info = {
'version': version,
'messagetype': messagetype,
'encryptionAlgorithmID': encryption
}
self.headers = {}
self.resources = {}
def __str__(self):
return self.encode()
def _parse_info(self, data):
"""Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line
"""
match = GNTP_INFO_LINE.match(data)
if not match:
raise ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info
def set_password(self, password, encryptAlgo='MD5'):
"""Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
"""
hash = {
'MD5': hashlib.md5,
'SHA1': hashlib.sha1,
'SHA256': hashlib.sha256,
'SHA512': hashlib.sha512,
}
self.password = password
self.encryptAlgo = encryptAlgo.upper()
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None
return
if not self.encryptAlgo in hash.keys():
raise UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
hashfunction = hash.get(self.encryptAlgo)
password = password.encode('utf8')
seed = time.ctime()
salt = hashfunction(seed).hexdigest()
saltHash = hashfunction(seed).digest()
keyBasis = password + saltHash
key = hashfunction(keyBasis).digest()
keyHash = hashfunction(key).hexdigest()
self.info['keyHashAlgorithmID'] = self.encryptAlgo
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper()
def _decode_hex(self, value):
"""Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string
"""
result = ''
for i in range(0, len(value), 2):
tmp = int(value[i:i + 2], 16)
result += chr(tmp)
return result
def _decode_binary(self, rawIdentifier, identifier):
rawIdentifier += '\r\n\r\n'
dataLength = int(identifier['Length'])
pointerStart = self.raw.find(rawIdentifier) + len(rawIdentifier)
pointerEnd = pointerStart + dataLength
data = self.raw[pointerStart:pointerEnd]
if not len(data) == dataLength:
raise ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s' % (dataLength, len(data)))
return data
def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password == None:
raise AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise AuthError('Invalid keyHash')
if self.password is None:
raise AuthError('Missing password')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise AuthError('Invalid Hash')
return True
def validate(self):
"""Verify required headers"""
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise ParseError('Missing Notification Header: ' + header)
def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = u'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
def _parse_dict(self, data):
"""Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict:
"""
dict = {}
for line in data.split('\r\n'):
match = GNTP_HEADER.match(line)
if not match:
continue
key = unicode(match.group(1).strip(), 'utf8', 'replace')
val = unicode(match.group(2).strip(), 'utf8', 'replace')
dict[key] = val
return dict
def add_header(self, key, value):
if isinstance(value, unicode):
self.headers[key] = value
else:
self.headers[key] = unicode('%s' % value, 'utf8', 'replace')
def add_resource(self, data):
"""Add binary resource
:param string data: Binary Data
"""
identifier = hashlib.md5(data).hexdigest()
self.resources[identifier] = data
return 'x-growl-resource://%s' % identifier
def decode(self, data, password=None):
"""Decode GNTP Message
:param string data:
"""
self.password = password
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self.headers = self._parse_dict(parts[0])
def encode(self):
"""Encode a generic GNTP Message
:return string: GNTP Message ready to be sent
"""
buffer = _GNTPBuffer()
buffer.writefmt(self._format_info())
#Headers
for k, v in self.headers.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Resources
for resource, data in self.resources.iteritems():
buffer.writefmt('Identifier: %s', resource)
buffer.writefmt('Length: %d', len(data))
buffer.writefmt()
buffer.write(data)
buffer.writefmt()
buffer.writefmt()
return buffer.getvalue()
class GNTPRegister(_GNTPBase):
"""Represents a GNTP Registration Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notifications-Count'
]
_requiredNotificationHeaders = ['Notification-Name']
def __init__(self, data=None, password=None):
_GNTPBase.__init__(self, 'REGISTER')
self.notifications = []
if data:
self.decode(data, password)
else:
self.set_password(password)
self.add_header('Application-Name', 'pygntp')
self.add_header('Notifications-Count', 0)
def validate(self):
'''Validate required headers and validate notification headers'''
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise ParseError('Missing Notification Header: ' + header)
def decode(self, data, password):
"""Decode existing GNTP Registration message
:param string data: Message to decode
"""
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Notification-Name', False):
self.notifications.append(notice)
elif notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
def add_notification(self, name, enabled=True):
"""Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default
"""
notice = {}
notice['Notification-Name'] = u'%s' % name
notice['Notification-Enabled'] = u'%s' % enabled
self.notifications.append(notice)
self.add_header('Notifications-Count', len(self.notifications))
def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message
"""
buffer = _GNTPBuffer()
buffer.writefmt(self._format_info())
#Headers
for k, v in self.headers.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.iteritems():
buffer.writefmt('%s: %s', k, v)
buffer.writefmt()
#Resources
for resource, data in self.resources.iteritems():
buffer.writefmt('Identifier: %s', resource)
buffer.writefmt('Length: %d', len(data))
buffer.writefmt()
buffer.write(data)
buffer.writefmt()
buffer.writefmt()
return buffer.getvalue()
class GNTPNotice(_GNTPBase):
"""Represents a GNTP Notification Command
:param string data: (Optional) See decode()
:param string app: (Optional) Set Application-Name
:param string name: (Optional) Set Notification-Name
:param string title: (Optional) Set Notification Title
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notification-Name',
'Notification-Title'
]
def __init__(self, data=None, app=None, name=None, title=None, password=None):
_GNTPBase.__init__(self, 'NOTIFY')
if data:
self.decode(data, password)
else:
self.set_password(password)
if app:
self.add_header('Application-Name', app)
if name:
self.add_header('Notification-Name', name)
if title:
self.add_header('Notification-Title', title)
def decode(self, data, password):
"""Decode existing GNTP Notification message
:param string data: Message to decode.
"""
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(data)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('notice.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
class GNTPSubscribe(_GNTPBase):
"""Represents a GNTP Subscribe Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Subscriber-ID',
'Subscriber-Name',
]
def __init__(self, data=None, password=None):
_GNTPBase.__init__(self, 'SUBSCRIBE')
if data:
self.decode(data, password)
else:
self.set_password(password)
class GNTPOK(_GNTPBase):
"""Represents a GNTP OK Response
:param string data: (Optional) See _GNTPResponse.decode()
:param string action: (Optional) Set type of action the OK Response is for
"""
_requiredHeaders = ['Response-Action']
def __init__(self, data=None, action=None):
_GNTPBase.__init__(self, '-OK')
if data:
self.decode(data)
if action:
self.add_header('Response-Action', action)
class GNTPError(_GNTPBase):
"""Represents a GNTP Error response
:param string data: (Optional) See _GNTPResponse.decode()
:param string errorcode: (Optional) Error code
:param string errordesc: (Optional) Error Description
"""
_requiredHeaders = ['Error-Code', 'Error-Description']
def __init__(self, data=None, errorcode=None, errordesc=None):
_GNTPBase.__init__(self, '-ERROR')
if data:
self.decode(data)
if errorcode:
self.add_header('Error-Code', errorcode)
self.add_header('Error-Description', errordesc)
def error(self):
return (self.headers.get('Error-Code', None),
self.headers.get('Error-Description', None))
def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise ParseError('INVALID_GNTP_MESSAGE')
| gpl-3.0 | 7,948,145,079,560,482,000 | 26.159136 | 105 | 0.68627 | false |
danielru/pySDC | pySDC/implementations/problem_classes/GeneralizedFisher_1D_FD_implicit.py | 1 | 6049 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import spsolve
from pySDC.core.Problem import ptype
from pySDC.core.Errors import ParameterError, ProblemError
# noinspection PyUnusedLocal
class generalized_fisher(ptype):
"""
Example implementing the generalized Fisher's equation in 1D with finite differences
Attributes:
A: second-order FD discretization of the 1D laplace operator
dx: distance between two spatial nodes
"""
def __init__(self, problem_params, dtype_u, dtype_f):
"""
Initialization routine
Args:
problem_params (dict): custom parameters for the example
dtype_u: mesh data type (will be passed parent class)
dtype_f: mesh data type (will be passed parent class)
"""
# these parameters will be used later, so assert their existence
essential_keys = ['nvars', 'nu', 'lambda0', 'newton_maxiter', 'newton_tol', 'interval']
for key in essential_keys:
if key not in problem_params:
msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
raise ParameterError(msg)
# we assert that nvars looks very particular here.. this will be necessary for coarsening in space later on
if (problem_params['nvars'] + 1) % 2 != 0:
raise ProblemError('setup requires nvars = 2^p - 1')
# invoke super init, passing number of dofs, dtype_u and dtype_f
super(generalized_fisher, self).__init__(problem_params['nvars'], dtype_u, dtype_f, problem_params)
# compute dx and get discretization matrix A
self.dx = (self.params.interval[1] - self.params.interval[0]) / (self.params.nvars + 1)
self.A = self.__get_A(self.params.nvars, self.dx)
@staticmethod
def __get_A(N, dx):
"""
Helper function to assemble FD matrix A in sparse format
Args:
N (int): number of dofs
dx (float): distance between two spatial nodes
Returns:
scipy.sparse.csc_matrix: matrix A in CSC format
"""
stencil = [1, -2, 1]
A = sp.diags(stencil, [-1, 0, 1], shape=(N + 2, N + 2), format='lil')
A *= 1.0 / (dx ** 2)
return A
# noinspection PyTypeChecker
def solve_system(self, rhs, factor, u0, t):
"""
Simple Newton solver
Args:
rhs (dtype_f): right-hand side for the nonlinear system
factor (float): abbrev. for the node-to-node stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver
t (float): current time (required here for the BC)
Returns:
dtype_u: solution u
"""
u = self.dtype_u(u0)
nu = self.params.nu
lambda0 = self.params.lambda0
# set up boundary values to embed inner points
lam1 = lambda0 / 2.0 * ((nu / 2.0 + 1) ** 0.5 + (nu / 2.0 + 1) ** (-0.5))
sig1 = lam1 - np.sqrt(lam1 ** 2 - lambda0 ** 2)
ul = (1 + (2 ** (nu / 2.0) - 1) *
np.exp(-nu / 2.0 * sig1 * (self.params.interval[0] + 2 * lam1 * t))) ** (-2.0 / nu)
ur = (1 + (2 ** (nu / 2.0) - 1) *
np.exp(-nu / 2.0 * sig1 * (self.params.interval[1] + 2 * lam1 * t))) ** (-2.0 / nu)
# start newton iteration
n = 0
while n < self.params.newton_maxiter:
# form the function g with g(u) = 0
uext = np.concatenate(([ul], u.values, [ur]))
g = u.values - \
factor * (self.A.dot(uext)[1:-1] + lambda0 ** 2 * u.values * (1 - u.values ** nu)) - rhs.values
# if g is close to 0, then we are done
res = np.linalg.norm(g, np.inf)
if res < self.params.newton_tol:
break
# assemble dg
dg = sp.eye(self.params.nvars) - factor * \
(self.A[1:-1, 1:-1] + sp.diags(lambda0 ** 2 - lambda0 ** 2 * (nu + 1) * u.values ** nu, offsets=0))
# newton update: u1 = u0 - g/dg
u.values -= spsolve(dg, g)
# increase iteration count
n += 1
return u
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
# set up boundary values to embed inner points
lam1 = self.params.lambda0 / 2.0 * ((self.params.nu / 2.0 + 1) ** 0.5 + (self.params.nu / 2.0 + 1) ** (-0.5))
sig1 = lam1 - np.sqrt(lam1 ** 2 - self.params.lambda0 ** 2)
ul = (1 + (2 ** (self.params.nu / 2.0) - 1) *
np.exp(-self.params.nu / 2.0 * sig1 * (self.params.interval[0] + 2 * lam1 * t))) ** (-2 / self.params.nu)
ur = (1 + (2 ** (self.params.nu / 2.0) - 1) *
np.exp(-self.params.nu / 2.0 * sig1 * (self.params.interval[1] + 2 * lam1 * t))) ** (-2 / self.params.nu)
uext = np.concatenate(([ul], u.values, [ur]))
f = self.dtype_f(self.init)
f.values = self.A.dot(uext)[1:-1] + self.params.lambda0 ** 2 * u.values * (1 - u.values ** self.params.nu)
return f
def u_exact(self, t):
"""
Routine to compute the exact solution at time t
Args:
t (float): current time
Returns:
dtype_u: exact solution
"""
me = self.dtype_u(self.init)
xvalues = np.array([(i + 1 - (self.params.nvars + 1) / 2) * self.dx for i in range(self.params.nvars)])
lam1 = self.params.lambda0 / 2.0 * ((self.params.nu / 2.0 + 1) ** 0.5 + (self.params.nu / 2.0 + 1) ** (-0.5))
sig1 = lam1 - np.sqrt(lam1 ** 2 - self.params.lambda0 ** 2)
me.values = (1 + (2 ** (self.params.nu / 2.0) - 1) *
np.exp(-self.params.nu / 2.0 * sig1 * (xvalues + 2 * lam1 * t))) ** (-2.0 / self.params.nu)
return me
| bsd-2-clause | 7,795,971,368,924,652,000 | 35.439759 | 119 | 0.535791 | false |
agx/git-buildpackage | gbp/scripts/push.py | 1 | 6716 | #!/usr/bin/python3
# vim: set fileencoding=utf-8 :
#
# (C) 2017 Guido Günther <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
"""Push your changes to a remote"""
import os
import sys
import gbp.log
from gbp.config import GbpOptionParserDebian
from gbp.deb.git import DebianGitRepository, GitRepositoryError
from gbp.deb.source import DebianSourceError
from gbp.deb.source import DebianSource
from gbp.errors import GbpError
from gbp.scripts.common import ExitCodes
def build_parser(name):
try:
parser = GbpOptionParserDebian(command=os.path.basename(name),
usage='%prog [options]')
except GbpError as err:
gbp.log.err(err)
return None
parser.add_option("-d", "--dry-run", dest="dryrun", default=False,
action="store_true", help="dry run, don't push.")
parser.add_config_file_option(option_name="upstream-branch",
dest="upstream_branch")
parser.add_config_file_option(option_name="upstream-tag",
dest="upstream_tag")
parser.add_config_file_option(option_name="debian-branch",
dest="debian_branch")
parser.add_config_file_option(option_name="debian-tag",
dest="debian_tag")
parser.add_boolean_config_file_option(option_name="pristine-tar",
dest="pristine_tar")
parser.add_boolean_config_file_option(option_name="ignore-branch", dest="ignore_branch")
parser.add_config_file_option(option_name="color", dest="color", type='tristate')
parser.add_config_file_option(option_name="color-scheme",
dest="color_scheme")
parser.add_option("--verbose", action="store_true", dest="verbose",
default=False, help="verbose command execution")
return parser
def parse_args(argv):
parser = build_parser(argv[0])
if not parser:
return None, None
return parser.parse_args(argv)
def do_push(repo, dests, to_push, dry_run):
verb = "Dry-run: Pushing" if dry_run else "Pushing"
success = True
for dest in dests:
for tag in to_push['tags']:
gbp.log.info("%s %s to %s" % (verb, tag, dest))
try:
repo.push_tag(dest, tag, dry_run=dry_run)
except GitRepositoryError as e:
gbp.log.err(e)
success = False
for k, v in to_push['refs']:
gbp.log.info("%s %s to %s:%s" % (verb, v, dest, k))
try:
repo.push(dest, v, k, dry_run=dry_run)
except GitRepositoryError as e:
gbp.log.err(e)
success = False
return success
def get_push_src(repo, ref, tag):
"""
Determine wether we can push the ref
If the ref is further ahead than the tag
we only want to push up to this tag.
"""
commit = repo.rev_parse("%s^{commit}" % tag)
if repo.rev_parse(ref) == commit:
return ref
else:
return commit
def get_remote(repo, branch):
remote_branch = repo.get_merge_branch(branch)
return remote_branch.split('/')[0] if remote_branch else 'origin'
def main(argv):
retval = 1
branch = None
dest = None
to_push = {
'refs': [],
'tags': [],
}
(options, args) = parse_args(argv)
if not options:
return ExitCodes.parse_error
if len(args) > 2:
gbp.log.err("Only a single remote repository can be given")
elif len(args) == 2:
dest = args[1]
gbp.log.setup(options.color, options.verbose, options.color_scheme)
try:
repo = DebianGitRepository(os.path.curdir, toplevel=False)
except GitRepositoryError:
gbp.log.err("%s is not inside a git repository" % (os.path.abspath('.')))
return 1
try:
source = DebianSource(repo.path)
branch = repo.branch
if not options.ignore_branch:
if branch != options.debian_branch:
gbp.log.err("You are not on branch '%s' but %s" %
(options.debian_branch,
"on '%s'" % branch if branch else 'in detached HEAD state'))
raise GbpError("Use --ignore-branch to ignore or --debian-branch to set the branch name.")
if not dest:
dest = get_remote(repo, branch)
if options.debian_tag != '':
dtag = repo.version_to_tag(options.debian_tag, source.version)
if repo.has_tag(dtag):
to_push['tags'].append(dtag)
if source.is_releasable() and branch:
ref = 'refs/heads/%s' % branch
to_push['refs'].append((ref, get_push_src(repo, ref, dtag)))
if not source.is_native():
if options.upstream_tag != '':
utag = repo.version_to_tag(options.upstream_tag,
source.upstream_version)
if repo.has_tag(utag):
to_push['tags'].append(utag)
if options.upstream_branch != '':
ref = 'refs/heads/%s' % options.upstream_branch
to_push['refs'].append((ref, get_push_src(repo, ref, utag)))
if options.pristine_tar:
commit, _ = repo.get_pristine_tar_commit(source)
if commit:
ref = 'refs/heads/pristine-tar'
to_push['refs'].append((ref, get_push_src(repo, ref, commit)))
if do_push(repo, [dest], to_push, dry_run=options.dryrun):
retval = 0
else:
gbp.log.err("Failed to push some refs.")
retval = 1
except (GbpError, GitRepositoryError, DebianSourceError) as err:
if str(err):
gbp.log.err(err)
except KeyboardInterrupt:
gbp.log.err("Interrupted. Aborting.")
return retval
if __name__ == '__main__':
sys.exit(main(sys.argv))
# vim:et:ts=4:sw=4:et:sts=4:ai:set list listchars=tab\:»·,trail\:·:
| gpl-2.0 | -3,093,064,265,374,619,600 | 34.513228 | 106 | 0.580155 | false |
pgfoster/p4-phylogenetics | p4/mcmccheckpointreader.py | 1 | 8159 | import os
import p4.func
import pickle
import math
import numpy
import glob
from p4.p4exceptions import P4Error
class McmcCheckPointReader(object):
"""Read in and display mcmc_checkPoint files.
Three options--
To read in a specific checkpoint file, specify the file name by
fName=whatever
To read in the most recent (by os.path.getmtime()) checkpoint
file, say last=True
If you specify neither of the above, it will read in all the
checkPoint files that it finds.
Where it looks is determined by theGlob, which by default is '*',
ie everything in the current directory. If you want to look
somewhere else, you can specify eg::
theGlob='SomeWhereElse/*'
or, if it is unambiguous, just::
theGlob='S*/*'
So you might say::
cpr = McmcCheckPointReader(theGlob='*_0.*')
to get all the checkpoints from the first run, run 0. Then, you
can tell the cpr object to do various things. Eg::
cpr.writeProposalAcceptances()
But perhaps the most powerful thing about it is that it allows
easy access to the checkpointed Mcmc objects, in the list mm. Eg
to get the first one, ask for::
m = cpr.mm[0]
and m is an Mcmc object, complete with all its records of
proposals and acceptances and so on. And the TreePartitions
object. No data, tho, of course.
(Sorry! -- Lazy documentation. See the source code for more that it can do.)
"""
def __init__(self, fName=None, theGlob='*', last=False, verbose=True):
self.mm = []
if not fName:
#fList = [fName for fName in os.listdir(os.getcwd()) if fName.startswith("mcmc_checkPoint")]
#fList = glob.glob(theGlob)
# print "Full glob = %s" % fList
fList = [fName for fName in glob.glob(theGlob) if
os.path.basename(fName).startswith("mcmc_checkPoint")]
# print fList
if not fList:
raise P4Error("No checkpoints found in this directory.")
if last:
# Find the most recent
mostRecent = os.path.getmtime(fList[0])
mostRecentFileName = fList[0]
if len(fList) > 1:
for fName in fList[1:]:
mtime = os.path.getmtime(fName)
if mtime > mostRecent:
mostRecent = mtime
mostRecentFileName = fName
f = open(mostRecentFileName, 'rb')
m = pickle.load(f)
f.close()
self.mm.append(m)
else:
# get all the files
for fName in fList:
f = open(fName, 'rb')
m = pickle.load(f)
f.close()
self.mm.append(m)
self.mm = p4.func.sortListOfObjectsOn2Attributes(
self.mm, "gen", 'runNum')
else:
# get the file by name
f = open(fName, 'rb')
m = pickle.load(f)
f.close()
self.mm.append(m)
if verbose:
self.dump()
def read(self, fName):
f = open(fName, 'rb')
m = pickle.load(f)
f.close()
self.mm.append(m)
def dump(self, extras=False):
print("McmcCheckPoints (%i checkPoints read)" % len(self.mm))
if extras:
print("%12s %12s %12s %12s %12s %12s %12s" % (
" ", "index", "run", "gen+1", "cpInterval", "sampInterv", "nSamps"))
print("%12s %12s %12s %12s %12s %12s %12s" % (
" ", "-----", "---", "-----", "----------", "----------", "------"))
for i in range(len(self.mm)):
m = self.mm[i]
assert m.checkPointInterval % m.sampleInterval == 0
if m.simTemp:
thisNSamps = m.treePartitions.nTrees
else:
thisNSamps = int(m.checkPointInterval / m.sampleInterval)
assert thisNSamps == m.treePartitions.nTrees
# print " %2i run %2i, gen+1 %11i" % (i, m.runNum, m.gen+1)
print("%12s %12s %12s %12s %12s %12s %12s" % (
" ", i, m.runNum, m.gen + 1, m.checkPointInterval, m.sampleInterval, thisNSamps))
else:
print("%12s %12s %12s %12s %12s" % (
" ", "index", "run", "gen+1", "nSamps"))
print("%12s %12s %12s %12s %12s" % (
" ", "-----", "---", "-----", "------"))
for i in range(len(self.mm)):
m = self.mm[i]
assert m.checkPointInterval % m.sampleInterval == 0
if hasattr(m, "simTemp") and m.simTemp:
thisNSamps = m.treePartitions.nTrees
else:
thisNSamps = int(m.checkPointInterval / m.sampleInterval)
assert thisNSamps == m.treePartitions.nTrees
# print(f"got thisNSamps {thisNSamps}, nTrees {m.treePartitions.nTrees}")
# print " %2i run %2i, gen+1 %11i" % (i, m.runNum, m.gen+1)
print("%12s %12s %12s %12s %12s" % (
" ", i, m.runNum, m.gen + 1, thisNSamps))
def compareSplits(self, mNum1, mNum2, verbose=True, minimumProportion=0.1):
"""Do the TreePartitions.compareSplits() method between two checkpoints
Args:
mNum1, mNum2 (int): indices to Mcmc checkpoints in self
Returns:
a tuple of asdoss and the maximum difference in split supports
"""
# Should we be only looking at splits within the 95% ci of the topologies?
m1 = self.mm[mNum1]
m2 = self.mm[mNum2]
tp1 = m1.treePartitions
tp2 = m2.treePartitions
if verbose:
print("\nMcmcCheckPointReader.compareSplits(%i,%i)" % (mNum1, mNum2))
print("%12s %12s %12s %12s %12s" % ("mNum", "runNum", "start", "gen+1", "nTrees"))
for i in range(5):
print(" ---------", end=' ')
print()
for mNum in [mNum1, mNum2]:
print(" %10i " % mNum, end=' ')
m = self.mm[mNum]
print(" %10i " % m.runNum, end=' ')
print(" %10i " % (m.startMinusOne + 1), end=' ')
print(" %10i " % (m.gen + 1), end=' ')
# for i in m.splitCompares:
# print i
print(" %10i " % m.treePartitions.nTrees)
asdos, maxDiff, meanDiff = p4.func._compareSplitsBetweenTwoTreePartitions(
tp1, tp2, minimumProportion, verbose=verbose)
asdos2, maxDiff2, meanDiff2= p4.func._compareSplitsBetweenTwoTreePartitions(
tp2, tp1, minimumProportion, verbose=False)
if math.fabs(asdos - asdos2) > 0.000001:
print("Reciprocal assdos differs: %s %s" % (asdos, asdos2))
if asdos == None and verbose:
print("No splits > %s" % minimumProportion)
return asdos, maxDiff, meanDiff
def compareSplitsAll(self, precision=3, linewidth=120):
"""Do func.compareSplitsBetweenTreePartitions() for all pairs
Output is verbose. Shows
- average standard deviation of split frequencies (or supports), like MrBayes
- maximum difference between split supports from each pair of checkpoints, like PhyloBayes
Returns:
None
"""
tpp = [m.treePartitions for m in self.mm]
p4.func.compareSplitsBetweenTreePartitions(tpp, precision=precision, linewidth=linewidth)
def writeProposalAcceptances(self):
for m in self.mm:
m.writeProposalAcceptances()
def writeSwapMatrices(self):
for m in self.mm:
if m.nChains > 1:
m.writeSwapMatrix()
def writeSwapVectors(self):
for m in self.mm:
if m.nChains > 1:
m.writeSwapVector()
def writeProposalProbs(self):
for m in self.mm:
m.writeProposalProbs()
| gpl-2.0 | -2,065,459,768,183,199,200 | 36.086364 | 104 | 0.530702 | false |
kapilt/cloud-custodian | tools/c7n_azure/c7n_azure/handler.py | 1 | 2673 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import uuid
from azure.common import AzureHttpError
from msrestazure.azure_exceptions import CloudError
from c7n.utils import reset_session_cache
from c7n.config import Config
from c7n.policy import PolicyCollection
from c7n.resources import load_resources
from c7n.structure import StructureParser
from c7n_azure.provider import Azure
log = logging.getLogger('custodian.azure.functions')
def run(event, context, subscription_id=None):
# policies file should always be valid in functions so do loading naively
with open(context['config_file']) as f:
policy_config = json.load(f)
if not policy_config or not policy_config.get('policies'):
log.error('Invalid policy config')
return False
options_overrides = \
policy_config['policies'][0].get('mode', {}).get('execution-options', {})
# setup our auth file location on disk
options_overrides['authorization_file'] = context['auth_file']
# if output_dir specified use that, otherwise make a temp directory
if 'output_dir' not in options_overrides:
options_overrides['output_dir'] = get_tmp_output_dir()
# merge all our options in
options = Config.empty(**options_overrides)
if subscription_id is not None:
options['account_id'] = subscription_id
load_resources(StructureParser().get_resource_types(policy_config))
options = Azure().initialize(options)
policies = PolicyCollection.from_data(policy_config, options)
if policies:
for p in policies:
try:
p.push(event, context)
except (CloudError, AzureHttpError) as error:
log.error("Unable to process policy: %s :: %s" % (p.name, error))
reset_session_cache()
return True
def get_tmp_output_dir():
output_dir = '/tmp/' + str(uuid.uuid4())
if not os.path.exists(output_dir):
try:
os.mkdir(output_dir)
except OSError as error:
log.error("Unable to make output directory: {}".format(error))
return output_dir
| apache-2.0 | 6,624,995,554,527,856,000 | 31.597561 | 81 | 0.699214 | false |
heromod/migrid | mig/shared/functionality/showvgridmonitor.py | 1 | 5290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# showvgridmonitor - show private vgrid monitor to vgrid participants
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Show the monitor page for requested vgrids - all_vgrids keyword for all
allowed vgrids"""
import os
import shared.returnvalues as returnvalues
from shared.defaults import all_vgrids
from shared.functional import validate_input_and_cert
from shared.html import themed_styles
from shared.init import initialize_main_variables, find_entry
from shared.vgrid import vgrid_is_owner_or_member, user_allowed_vgrids
def signature():
"""Signature of the main function"""
defaults = {'vgrid_name': [all_vgrids]}
return ['html_form', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
meta = '''<meta http-equiv="refresh" content="%s" />
''' % configuration.sleep_secs
style = themed_styles(configuration)
script = '''
<script type="text/javascript" src="/images/js/jquery.js"></script>
<script type="text/javascript" src="/images/js/jquery.tablesorter.js"></script>
<script type="text/javascript" >
$(document).ready(function() {
// table initially sorted by col. 1 (name)
var sortOrder = [[1,0]];
// use image path for sorting if there is any inside
var imgTitle = function(contents) {
var key = $(contents).find("a").attr("class");
if (key == null) {
key = $(contents).html();
}
return key;
}
$("table.monitor").tablesorter({widgets: ["zebra"],
textExtraction: imgTitle,
});
$("table.monitor").each(function () {
try {
$(this).trigger("sorton", [sortOrder]);
} catch(err) {
/* tablesorter chokes on empty tables - just continue */
}
});
}
);
</script>
'''
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = '%s Monitor' % configuration.short_title
title_entry['meta'] = meta
title_entry['style'] = style
title_entry['javascript'] = script
allowed_vgrids = user_allowed_vgrids(configuration, client_id)
vgrid_list = accepted['vgrid_name']
if all_vgrids in accepted['vgrid_name']:
vgrid_list = [i for i in vgrid_list if all_vgrids != i]\
+ allowed_vgrids
# Force list to sequence of unique entries
for vgrid_name in set(vgrid_list):
html = ''
if not vgrid_is_owner_or_member(vgrid_name, client_id,
configuration):
output_objects.append({'object_type': 'error_text', 'text'
: '''You must be an owner or member of %s %s
to access the monitor.''' % (vgrid_name, configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
monitor_file = os.path.join(configuration.vgrid_home, vgrid_name,
'%s.html' % configuration.vgrid_monitor)
try:
monitor_fd = open(monitor_file, 'r')
past_header = False
for line in monitor_fd:
if -1 != line.find('end of raw header'):
past_header = True
continue
if not past_header:
continue
if -1 != line.find('begin raw footer:'):
break
html += str(line)
monitor_fd.close()
except Exception, exc:
output_objects.append({'object_type': 'error_text', 'text'
: 'Error reading %s monitor page (%s)'
% (configuration.site_vgrid_label, exc)})
return (output_objects, returnvalues.SYSTEM_ERROR)
output_objects.append({'object_type': 'html_form', 'text'
: html})
return (output_objects, returnvalues.OK)
| gpl-2.0 | 3,584,203,950,245,496,300 | 34.266667 | 81 | 0.590359 | false |
tensorflow/models | official/vision/beta/projects/volumetric_models/train.py | 1 | 1030 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
import gin # pylint: disable=unused-import
from official.common import flags as tfm_flags
from official.vision.beta import train
from official.vision.beta.projects.volumetric_models import registry_imports # pylint: disable=unused-import
def main(_):
train.main(_)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| apache-2.0 | -7,770,596,292,402,876,000 | 31.1875 | 109 | 0.754369 | false |
PapenfussLab/MHC-clogs | lib/mhc/proteintools.py | 1 | 7336 | """
proteintools.py
"""
import re
import string
from mhc.data import *
from mhc.biomart import *
from mhc.hmmer2 import *
from mungolite.fasta import FastaFile
class Protein:
"""
Container for protein data.
"""
def __init__(self, ensembl_protein_id=None, ensembl_transcript_id=None,
ensembl_gene_id=None, gene_symbol=None, description=None,
header=None, seq=None):
self.ensembl_protein_id = ensembl_protein_id
self.ensembl_transcript_id = ensembl_transcript_id
self.ensembl_gene_id = ensembl_gene_id
self.gene_symbol = gene_symbol
self.description = description
self.header = header
self.seq = seq
self.biomart_gene = None
def __repr__(self):
format = "%(ensembl_protein_id)s\t%(ensembl_transcript_id)s\t%(ensembl_gene_id)s\t%(gene_symbol)s\t%(description)s"
return format % self.__dict__
class ProteinDatabase:
"""
Database of sequences and protein data.
Lookup protein/gene data by protein id.
"""
def __init__(self, species, fasta_filename, biomart_filename):
self.species = species
self.proteins = {}
self.proteins_seq = FastaFile(fasta_filename, indexed=True)
self.biomart_genes = BiomartGene.parse(biomart_filename)
def __getitem__(self, ensembl_protein_id):
# print ensembl_protein_id
h,s = self.proteins_seq.search(ensembl_protein_id)
protein = Protein()
tokens = h.split()
protein.ensembl_protein_id = tokens[0]
protein.description = " ".join(tokens[1:])
protein.ensembl_gene_id = tokens[3].split(":")[1]
protein.ensembl_transcript_id = tokens[4].split(":")[1]
try:
protein.biomart_gene = self.biomart_genes[protein.ensembl_gene_id]
if not protein.biomart_gene.gene_symbol is None:
rs = re.search(" \((?P<num>[0-9]+) of [0-9]+\)", protein.biomart_gene.gene_symbol)
if rs:
i,j = rs.start(), rs.end()
num = int(rs.groups()[0])
if num<26:
suffix = string.letters[num-1]
else:
suffix = "__%s" % num
protein.biomart_gene.gene_symbol = protein.biomart_gene.gene_symbol[0:i] + suffix
protein.gene_symbol = "%s_%s" % (
species_short[self.species], protein.biomart_gene.gene_symbol)
else:
protein.gene_symbol = "%s_%s" % (
species_short[self.species], protein.ensembl_gene_id)
except KeyError:
protein.biomart_gene = BiomartGene()
protein.gene_symbol = protein.ensembl_gene_id
protein.seq = s
return protein
class DomainCombination:
"""
Base class for domain combinations in a single protein
"""
def __init__(self):
self.domain_names = []
def has_domain(self, domain_name):
return domain_name in self.domain_names
def sort(self, key=lambda x: -x.overall_score):
for domain_name in self.domain_names:
self.__dict__[domain_name].sort(key=key)
def add_domain(self, domain_name, domain):
self.domain_names.append(domain_name)
try:
self.__dict__[domain_name].append(domain)
except KeyError:
self.__dict__[domain_name] = [domain]
def __repr__(self):
output = []
for domain_name in self.domain_names:
output.append(str(self.__dict__[domain_name]))
return "\n".join(output) + "\n"
class MHCClassIDomainCombination(DomainCombination):
"""
The MHCClassIDomainCombination contains domain matches to a single protein.
Makes testing class I-ness or class II-ness easy.
"""
def get_score(self):
score = 0
self.sort()
if self.has_domain("MHC_I"): score += self.MHC_I[0].overall_score
if self.has_domain("C1_set"): score += self.C1_set[0].overall_score
return score
def get_evalue(self):
evalue = 1
self.sort()
if self.has_domain("MHC_I"): evalue *= self.MHC_I[0].independent_evalue
if self.has_domain("C1_set"): evalue *= self.C1_set[0].independent_evalue
return evalue
def is_class_I(self, cutoff=1e-5):
self.sort()
# No class I domain
if not self.has_domain("MHC_I"): return False
# Class II domain
if self.has_domain("MHC_II_beta") and \
self.MHC_I[0].overall_score<=self.MHC_II_beta[0].overall_score:
return False
# Strong class I hit
if self.MHC_I[0].independent_evalue<=cutoff: return True
# Weak class I, but no Ig
if not self.has_domain("C1_set"): return False
# Weak class I and strong Ig
weak_hit_plus_ig = self.MHC_I[0].overall_score>0 and \
self.C1_set[0].independent_evalue<=cutoff
MHC_I_pos = 0.5*(self.MHC_I[0].sequence_start+self.MHC_I[0].sequence_end)
C1_set_pos = 0.5*(self.C1_set[0].sequence_start+self.C1_set[0].sequence_end)
good_position = MHC_I_pos<C1_set_pos
if weak_hit_plus_ig and good_position:
return True
else:
return False
def is_class_II(self, cutoff=1e-5):
# No class II domain
if not self.has_domain("MHC_II_beta"): return False
# Class I domain
if self.has_domain("MHC_I") and \
self.MHC_II_beta[0].overall_score<=self.MHC_I[0].overall_score:
return False
# Strong class II hit
if self.MHC_II_beta[0].independent_evalue<=cutoff: return True
# Weak class II, but no Ig
if not self.has_domain("C1_set"): return False
# Weak class II and strong Ig
weak_hit_plus_ig = self.MHC_II_beta[0].overall_score>0 and \
self.C1_set[0].independent_evalue<=cutoff
MHC_II_pos = 0.5*(self.MHC_II_beta[0].sequence_start+self.MHC_II_beta[0].sequence_end)
C1_set_pos = 0.5*(self.C1_set[0].sequence_start+self.C1_set[0].sequence_end)
good_position = MHC_II_pos<C1_set_pos
if weak_hit_plus_ig and good_position:
return True
else:
return False
class MHCClassIDomainCombiner:
"""
Combiner lets you add domains from from multiple searches and collects
them by protein_id (target_name). Provides a convenient iterator over
the protein hits.
"""
def __init__(self):
self.combinations = {}
def __getitem__(self, protein_id):
return self.combinations[protein_id]
def __iter__(self):
self.index = -1
return self
def next(self):
self.index += 1
if self.index>=len(self.combinations): raise StopIteration
k = self.combinations.keys()[self.index]
return k
def add_domains(self, domain_name, domain_filename):
for domain in DomainHit.parse(domain_filename):
try:
self.combinations[domain.target_name].add_domain(domain_name, domain)
except KeyError:
self.combinations[domain.target_name] = MHCClassIDomainCombination()
self.combinations[domain.target_name].add_domain(domain_name, domain)
| artistic-2.0 | 114,229,983,989,476,510 | 33.441315 | 124 | 0.589558 | false |
gvx/deja | dis.py | 1 | 3209 | import struct
from bytecode import (
OPCODES, unsigned_int_s, signed_int_s, double_s, signed_long_int_s,
unsigned_long_int_s, signed_char_s, positional_instructions
)
from strquot import quote
DECODE_OPCODES = {}
for k in OPCODES:
DECODE_OPCODES[OPCODES[k] / 0x1000000] = k
WORD_ARG = set('GET SET GET_GLOBAL SET_GLOBAL SET_LOCAL PUSH_LITERAL PUSH_WORD SOURCE_FILE'.split())
POS_ARG = positional_instructions
def d_unsigned_int(x):
return unsigned_int_s.unpack('\x00' + x)[0]
def unsigned_int(x):
return unsigned_int_s.unpack(x)[0]
def unsigned_long_int(x):
return unsigned_long_int_s.unpack(x)[0]
def signed_long_int(x):
return signed_long_int_s.unpack(x)[0]
def signed_char(x):
return signed_char_s.unpack(x)[0]
def d_signed_int(x):
if x[0] >= '\x80':
x = '\xff' + x
else:
x = '\x00' + x
return signed_int_s.unpack(x)[0]
def d_double(x):
return double_s.unpack(x)[0]
class Literals(object):
def __init__(self, source):
self.source = source
self.cache = []
def __getitem__(self, item):
while item >= len(self.cache):
s = self.source[0]
if s == '\x00':
length = unsigned_int(self.source[1:5])
b = ":" + self.source[5:5 + length]
self.source = self.source[5 + length:]
elif s == '\x01':
length = unsigned_int(self.source[1:5]) #<-- length?
b = '"' + quote(self.source[5:5 + length]) + '"'
self.source = self.source[5 + length:]
if s == '\x80':
length = ord(self.source[1])
b = ":" + self.source[2:2 + length]
self.source = self.source[2 + length:]
elif s == '\x81':
length = ord(self.source[1]) #<-- length?
b = '"' + quote(self.source[2:2 + length]) + '"'
self.source = self.source[2 + length:]
elif s == '\x02':
b = d_double(self.source[1:9])
self.source = self.source[9:]
elif s == '\x82':
b = d_signed_int(self.source[1:4])
self.source = self.source[4:]
elif s == '\x07':
n = signed_long_int(self.source[1:9])
d = signed_long_int(self.source[9:17])
b = str(n) + '/' + str(d)
self.source = self.source[17:]
elif s == '\x87':
n = signed_char(self.source[1])
d = ord(self.source[2])
b = str(n) + '/' + str(d)
self.source = self.source[3:]
self.cache.append(b)
return self.cache[item]
def make_line_00(i, x, literals):
op = DECODE_OPCODES[ord(x[0])]
if op in WORD_ARG:
arg = literals[d_unsigned_int(x[1:])]
elif op == 'PUSH_INTEGER':
arg = d_signed_int(x[1:])
elif op in POS_ARG:
arg = i + d_signed_int(x[1:])
elif op == 'LINE_NUMBER':
arg = d_unsigned_int(x[1:])
else:
arg = ''
return '%03d %s %s' % (i, op, arg)
def dis_00(text):
if len(text) < 4:
raise Exception("Code file too short")
size = unsigned_int(text[:4])
text = text[4:]
code = [text[j * 4:j * 4 + 4] for j in range(size)]
literals = Literals(text[size * 4:])
return '\n'.join(make_line_00(i, x, literals) for i, x in enumerate(code))
def dis(text):
if not text.startswith('\x07DV'):
raise Exception("Not a Deja Vu byte code file.")
elif text[3] in ('\x00', '\x01', '\x02', '\x03'):
return dis_00(text[4:])
else:
raise Exception("Byte code version not recognised.")
if __name__ == '__main__':
import sys
sys.stdout.write(dis(sys.stdin.read()))
| isc | -680,923,642,128,849,900 | 26.904348 | 100 | 0.609847 | false |
Seldaiendil/meyeOS | devtools/qooxdoo-1.5-sdk/tool/pylib/generator/code/Class.py | 1 | 11441 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2010-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Thomas Herchenroeder (thron7)
#
################################################################################
##
# Class -- Internal representation of a qooxdoo class; derives from Resource
##
import os, sys, re, types, copy
import time, math
from pprint import pprint
from misc import textutil
from ecmascript.frontend import treeutil
from ecmascript.transform.optimizer import variantoptimizer
from generator.resource.Resource import Resource
from generator import Context
from generator.code.clazz.MClassHints import MClassHints
from generator.code.clazz.MClassI18N import MClassI18N
from generator.code.clazz.MClassDependencies import MClassDependencies
from generator.code.clazz.MClassCode import MClassCode
from generator.code.clazz.MClassResources import MClassResources
class Class(Resource, MClassHints, MClassI18N, MClassDependencies, MClassCode, MClassResources):
def __init__(self, name, path, library, context, container):
#__slots__ = ('id', 'path', 'size', 'encoding', 'library', 'context', 'source', 'scopes', 'translations')
global console, cache
super(Class, self).__init__(path)
self.id = name # qooxdoo name of class, classId
self.library = library # Library()
# TODO: we now have both a 'context' param, but also use generator.Context (needed in __setstate__)
self.context = context
self._classesObj= container # this is ugly, but curr. used to identify known names
self.size = -1
self.encoding = 'utf-8'
self.source = u'' # source text of this class
#self.ast = None # ecmascript.frontend.tree instance
#self.type = "" # PROPERTY
self.scopes = None # an ecmascript.frontend.Script instance
self.translations = {} # map of translatable strings in this class
self.resources = set() # set of resource objects needed by the class
self._assetRegex= {} # [AssetHint], to hold regex's from #asset hints, for resource matching
self.cacheId = "class-%s" % self.path # cache object for class-specific infos (outside tree, compile)
console = context["console"]
cache = context["cache"]
self.defaultIgnoredNamesDynamic = [lib["namespace"] for lib in self.context['jobconf'].get("library", [])]
def __getstate__(self):
d = self.__dict__.copy()
# need to copy nested map, or i will modify original one
d['context'] = d['context'].copy()
del d['context']['cache']
return d
def __setstate__(self, d):
if hasattr(Context, "cache"):
d['context']['cache'] = Context.cache
d['defaultIgnoredNamesDynamic'] = [lib["namespace"] for lib in d['context']['jobconf'].get("library", [])]
self.__dict__ = d
def _getType(self):
if hasattr(self, "_type"):
return self._type
ast = self.tree()
qxDefine = treeutil.findQxDefine(ast)
classMap = treeutil.getClassMap(qxDefine)
if 'type' in classMap:
self._type = classMap['type'].get('value')
elif 'extend' not in classMap:
self._type = "static" # this is qx.Class.define semantics!
else:
self._type = "normal"
return self._type
type = property(_getType)
##
# classInfo = {
# 'svariants' : ['qx.debug'] # supported variants
# 'deps-<path>-<variants>' : ([<Dep>qx.Class#define], <timestamp>) # class dependencies
# 'messages-<variants>' : ["Hello %1"] # message strings
# }
def _getClassCache(self):
cache = self.context['cache']
classInfo, modTime = cache.read(self.cacheId, self.path, memory=True)
if classInfo:
if self.writeCond():
print "\nReading %s (keys: %s)" % (self.cacheId,
["%s:%s" % (i,self.foo(classInfo[i][1]) if len(classInfo[i])>1 else "-") for i in classInfo])
for k in classInfo.keys():
if k.startswith("deps-"):
data = classInfo[k][0]['load']
print (sorted(data, key=str))
print "len:", len(data)
return classInfo, modTime
else:
return {}, None
def _writeClassCache(self, classInfo):
cache = self.context['cache']
if self.writeCond():
import time
print "\nWriting %s (keys: %s)" % (self.cacheId,
["%s:%s" % (i,self.foo(classInfo[i][1]) if len(classInfo[i])>1 else "-") for i in classInfo])
for k in classInfo.keys():
if k.startswith("deps-"):
data = classInfo[k][0]['load']
print (sorted(data, key=str))
print "len:", len(data)
cache.write(self.cacheId, classInfo, memory=True)
def foo(s,t):
d = time.strftime("%Y:%m:%d-%H:%M:%S::%%2.d", time.localtime(t))
d = d % ((t-math.trunc(t))*100,)
return d
def writeCond(self):
return False #self.id == "qx.core.Environment"
@staticmethod
def optimizeEnvironmentClass(envClass, compOptions):
tree = envClass.tree(compOptions.variantset)
# has to come before string optimization, or the "qx.debug" etc args are gone
tree = variantoptimizer.processEnvironmentClass(tree, compOptions.allClassVariants)
if compOptions.optimize:
tree = envClass.optimize(tree, compOptions.optimize)
return tree
##
# Duplication with clazz.ClassDependencies.DependencyItem
class DependencyItem(object):
def __init__(self, name, attribute, requestor, line=-1, isLoadDep=False):
self.name = name # "qx.Class" [dependency to (class)]
assert isinstance(name, types.StringTypes)
self.attribute = attribute # "methodA" [dependency to (class.attribute)]
self.requestor = requestor # "gui.Application" [the one depending on this item]
self.line = line # 147 [source line in dependent's file]
self.isLoadDep = isLoadDep # True [load or run dependency]
self.needsRecursion = False # this is a load-time dep that draws in external deps recursively
self.isCall = False # whether the reference is a function call
def __repr__(self):
return "<DepItem>:" + self.name + "#" + self.attribute
def __str__(self):
return self.name + "#" + self.attribute
def __eq__(self, other):
return self.name == other.name and self.attribute == other.attribute
def __hash__(self):
return hash(self.name + self.attribute)
##
# Throw this in cases of dependency problems
class DependencyError(ValueError): pass
##
# Auxiliary class for ClassDependencies() (although of more general appeal)
class ClassMap(object):
def __init__(self):
# after http://manual.qooxdoo.org/current/pages/core/class_quickref.html
self.data = {
'type' : None,
'extend' : [],
'implement' : [],
'include' : [],
'construct' : [],
'statics' : {}, # { foo1 : [<dep1>,...], foo2 : [<dep2>,...] }
'properties': {},
'members' : {}, # { foo1 : [<dep1>,...], foo2 : [<dep2>,...] }
'settings' : [],
'variants' : [],
'events' : [],
'defer' : [],
'destruct' : [],
}
return
##
# Captures the dependencies of a class (-file)
# - the main purpose of this is to have an accessible, shallow representation of
# a class' dependencies, for caching and traversing
class ClassDependencies(object):
def __init__(self):
self.data = {
'require' : [], # [qx.Class#define, #require(...), ... <other top-level code symbols>]
'use' : [], # [#use(...)]
'optional': [], # [#optional(...)]
'ignore' : [], # [#ignore(...)]
'classes' : {}, # {"classId" : ClassMap(), where the map values are lists of depsItems}
}
return
##
# only iterates over the 'classes'
def dependencyIterator(self):
for classid, classMapObj in self.data['classes'].items():
classMap = classMapObj.data
for attrib in classMap:
if isinstance(classMap[attrib], types.ListType): # .defer
for dep in classMap[attrib]:
yield dep
elif isinstance(classMap[attrib], types.DictType): # .statics, .members, ...
for subattrib in classMap[attrib]:
for dep in classMap[attrib][subattrib]: # e.g. methods
yield dep
def getAttributeDeps(self, attrib): # attrib="ignore", "qx.Class#define"
res = []
data = self.data
# top level
if attrib.find('#')== -1:
res = data[attrib]
# class map
else:
classId, attribId = attrib.split('#', 1)
data = data['classes'][classId].data
if attribId in data:
res = data[attribId]
else:
for submap in ('statics', 'members', 'properties'):
if attribId in data[submap]:
res = data[submap][attribId]
break
return res
##
# Class to represent ["qx.util.*", "qx.core.Object"] et al.
# (like used in "include" and "exclude" config keys), to provide an
# encapsulated "match" method
class ClassMatchList(object):
def __init__(self, matchlist):
assert isinstance(matchlist, types.ListType)
self.matchlist = matchlist # ["a.b.c.*", "d.e.Foo"]
elems = []
for elem in matchlist:
assert isinstance(elem, types.StringTypes)
if elem != "":
regexp = textutil.toRegExpS(elem)
elems.append(regexp)
if elems:
self.__regexp = re.compile("|".join(elems))
else:
self.__regexp = re.compile(r".\A") # match none
def isEmpty(self):
return len(self.matchlist) == 0
def match(self, classId):
return self.__regexp.search(classId)
##
# Class to collect various options which influence the compilation process
# (optimizations, format, variants, ...)
class CompileOptions(object):
def __init__(self, optimize=[], variants={}, _format=False, source_with_comments=False):
self.optimize = optimize
self.variantset = variants
self.format = _format
self.source_with_comments = source_with_comments
self.privateMap = {} # {"<classId>:<private>":"<repl>"}
| agpl-3.0 | -8,735,955,568,168,217,000 | 37.914966 | 119 | 0.558081 | false |
ios-xr/iosxr-ansible | local/library/iosxr_show_install_last_log.py | 1 | 2176 | #!/usr/bin/python
#------------------------------------------------------------------------------
#
# Copyright (C) 2016 Cisco Systems, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------------
from ansible.module_utils.basic import *
from ydk.providers import NetconfServiceProvider
from ydk.services import CRUDService
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_spirit_install_instmgr_oper import SoftwareInstall
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True),
username = dict(required=False, default=None),
password = dict(required=False, default=None),
),
supports_check_mode = False
)
args = module.params
# establish ssh connection
provider = NetconfServiceProvider(address=args['host'],
port=830,
username=args['username'],
password=args['password'],
protocol='ssh')
# establish CRUD service
crud = CRUDService()
# retrieve software install version
install = SoftwareInstall()
info = crud.read(provider, install)
result = dict(changed=False)
result['stdout'] = "no log available"
for logger in info.last_n_operation_logs.last_n_operation_log:
result['stdout'] = logger.summary.log
return module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 | 5,030,021,011,373,954,000 | 36.517241 | 92 | 0.59329 | false |
MCLConsortium/mcl-site | src/jpl.mcl.site.sciencedata/src/jpl/mcl/site/sciencedata/interfaces.py | 1 | 1169 | # encoding: utf-8
u'''MCL Site Knowledge — interfaces.'''
from . import MESSAGE_FACTORY as _
from zope import schema
from zope.interface import Interface
class IIngestor(Interface):
u'''Interface for objects that are ingestors.'''
def ingest():
u'''Ingest data from your RDF source and populate your items. Returns an IngestResults object.'''
class ISettings(Interface):
u'''Schema for MCL Site Knowledge settings control panel.'''
ingestEnabled = schema.Bool(
title=_(u'Enable Ingest'),
description=_(u'True (checked) if global RDF ingest is enabled'),
required=False,
)
ingestStart = schema.Datetime(
title=_(u'Start Time'),
description=_(u"If value appears, this indicates the time an active ingest started. You won't need to set this."),
required=False,
)
objects = schema.List(
title=_(u'Objects'),
description=_(u'Paths to objects that should be ingested.'),
required=False,
value_type=schema.TextLine(
title=_(u'Object'),
description=_(u'Path to an object whose contents should be ingested.')
)
)
| apache-2.0 | -6,736,158,160,591,152,000 | 31.416667 | 122 | 0.644387 | false |
markgw/jazzparser | src/jazzparser/formalisms/music_halfspan/semantics/distance.py | 1 | 20539 | """Semantic distance metrics.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
from jazzparser.utils.options import ModuleOption, choose_from_list
from jazzparser.formalisms.base.semantics.distance import DistanceMetric, \
FScoreMetric
from jazzparser.formalisms.music_halfspan.evaluation import tonal_space_f_score, \
tonal_space_alignment_score, tonal_space_align, \
arrange_alignment, tonal_space_distance, \
tonal_space_length
class TonalSpaceEditDistance(FScoreMetric):
"""
Original tonal space distance metric computed as the edit distance of
the step vectors and functions of the path through the tonal space
implied by the semantics.
"""
OPTIONS = [
ModuleOption('output', filter=choose_from_list(
['f','precision','recall','inversef','dist']),
usage="output=O, where O is one of 'f', 'precision', "\
"'recall', 'inversef', 'dist'",
default='dist',
help_text="Select what metric to output. Choose recall "\
"or precision for asymmetric metrics. F-score ('f') "\
"combines these two. This is inverted ('inversef') "\
"to get a distance, rather than similarity. "\
"Alternatively, use the edit distance of the alignment "\
"('dist', default)"),
]
name = "tsed"
def fscore_match(self, sem1, sem2):
if sem1 is None or sem2 is None:
alignment_score = 0.0
else:
alignment_score = tonal_space_alignment_score(sem1.lf, sem2.lf)
if sem1 is None:
len1 = 0.0
else:
len1 = tonal_space_length(sem1)
if sem2 is None:
len2 = 0.0
else:
len2 = tonal_space_length(sem2)
return alignment_score,len1,len2
def _get_identifier(self):
ident = {
'f' : 'f-score',
'precision' : 'precision',
'recall' : 'recall',
'inversef' : 'inverse f-score',
'dist' : 'edit distance',
}
return "tsed %s" % ident[self.options['output']]
identifier = property(_get_identifier)
def distance(self, sem1, sem2):
# Handle the extra 'dist' case
if self.options['output'] == 'dist':
# If one input is empty, we consider all points to have been deleted
if sem1 is None:
return tonal_space_length(sem2)
elif sem2 is None:
return tonal_space_length(sem1)
# Compute the score using our standard TS distance computation
# This is based on the alignment score of the optimal alignment
# of the two sequences
return tonal_space_distance(sem1.lf, sem2.lf)
else:
# Otherwise the superclass takes care of everything
return super(TonalSpaceEditDistance, self).distance(sem1, sem2)
def print_computation(self, sem1, sem2):
"""
Shows the optimal alignment of the paths that the score comes from.
@see: jazzparser.formalisms.music_halfspan.semantics.distance.DistanceMetric.print_computation
"""
pairs = tonal_space_align(sem1.lf, sem2.lf)
return "\n".join(["%s %s" % pair for pair in pairs])
def total_distance(self, input_pairs):
""" Handle the 'dist' output specially (just sum up distances). """
if self.options['output'] == 'dist':
# Do the normal (non-f-score) metric thing of summing up all vals
return DistanceMetric.total_distance(self, input_pairs)
else:
return FScoreMetric.total_distance(self, input_pairs)
def format_distance(self, dist):
if self.options['output'] == 'dist':
return "%f" % dist
else:
return FScoreMetric.format_distance(self, dist)
def _cadence_type(tree):
# The grammar currently ensures that only one cadence type is used
# throughout a specific cadence. If this changes, we'll want to
# redefine this metric
if len(tree.root) == 0:
# Root is a leaf: no cadence
return "NA"
else:
# Pick the first label we come across
label = tree.root[0].label
if label == "leftonto":
return "perfect"
elif label == "rightonto":
return "plagal"
else:
raise ValueError, "unknown cadence type with node label "\
"'%s' in the dependency graph" % label
class LargestCommonEmbeddedSubtrees(FScoreMetric):
"""
Tonal space distance metric computed as the size of the largest subtree
that can be embedded in the dependency graphs of two logical forms. This
is done separately for each alignment of cadences in the two logical
forms and the global optimum is used.
"""
OPTIONS = FScoreMetric.OPTIONS + [
ModuleOption('res_score', filter=int,
usage="res_score=R, where R is an integer",
default=2,
help_text="Score to give to matching resolutions. 1 "\
"is the score given to a matching node in the "\
"dependency tree. The default (2) gives more "\
"weight to matching resolutions that tree nodes. "\
"Special value -1 assigns a weight equal to the size "\
"of the common dependency tree + 1"),
]
name = "lces"
def _get_identifier(self):
ident = {
'f' : 'f-score',
'precision' : 'precision',
'recall' : 'recall',
'inversef' : 'inverse f-score',
}
return "dependency tree %s" % ident[self.options['output']]
identifier = property(_get_identifier)
def fscore_match(self, sem1, sem2):
"""
The core computation of the distance metric. Takes care of the tree
comparison and cadence alignment and return the vital statistics.
"""
from jazzparser.formalisms.music_halfspan.harmstruct import \
semantics_to_dependency_trees
from jazzparser.misc.tree.lces import lces_size
from jazzparser.utils.distance import align
res_score = self.options['res_score']
# Get dependency graphs for the two logical forms
if sem1 is None:
trees1 = []
else:
trees1 = semantics_to_dependency_trees(sem1)
if sem2 is None:
trees2 = []
else:
trees2 = semantics_to_dependency_trees(sem2)
if sem1 is None or sem2 is None:
# Empty input: give zero score to everything
alignment_score = 0.0
alignment = []
transpose = None
else:
# Try each possible transposition of the second tree to make this
# metric key independent
distances = []
for x_trans in range(4):
for y_trans in range(3):
def _align(tree1, tree2):
# Transpose the label in the second tree
label2 = ((tree2.root.label[0] + x_trans) % 4,
(tree2.root.label[1] + y_trans) % 3)
# Check the root to find out whether they have the same resolution
same_res = tree1.root.label == label2
# Find out what cadence type each is
same_cad = _cadence_type(tree1) == _cadence_type(tree2)
if same_cad:
# Compare the structure of the cadences
tree_similarity = lces_size(tree1, tree2)
else:
tree_similarity = 0
# Work out how much score to give a matching resolution
if res_score == -1:
res_match = tree_similarity + 1
else:
res_match = res_score
return - tree_similarity - (res_match if same_res else 0)
aligned,dist = align(trees1, trees2, delins_cost=0,
subst_cost=_align,
dist=True)
distances.append((dist,aligned,(x_trans,y_trans)))
alignment_score,alignment,transpose = min(distances,
key=lambda x:x[0])
alignment_score = -float(alignment_score)
def _max_score(trees):
"""
Get the maximum possible score that could be assigned to a match
with this tree set.
"""
score = 0
for tree in trees:
# Do the same things as _align (below), but max possible score
# Maximum similarity is just the size of the tree
tree_sim = len(tree)
if res_score == -1:
res_match = tree_sim + 1
else:
res_match = res_score
# Assume the same resolution and cadence type
score += tree_sim + res_match
return score
max_score1 = _max_score(trees1)
max_score2 = _max_score(trees2)
return alignment_score, max_score1, max_score2, alignment, transpose
def print_computation(self, sem1, sem2):
from jazzparser.misc.tree.lces import lces
from cStringIO import StringIO
stats = self.fscore_match(sem1, sem2)
trans = stats[4]
buf = StringIO()
print >>buf, "LF1: %s" % sem1
print >>buf, "LF2: %s" % sem2
print >>buf, "LF2 transposed by (%d,%d)\n" % trans
print >>buf, "Maximal cadence alignment:"
# Go through all the aligned cadences and show the components of the
# scores
for sem1cad, sem2cad in stats[3]:
if sem1cad is None:
print >>buf, "1: deleted"
else:
print >>buf, "1: %s" % sem1cad
if sem2cad is None:
print >>buf, "2: deleted"
else:
print >>buf, "2: %s" % sem2cad
if sem1cad is not None and sem2cad is not None:
# Cadences were aligned: explain how
print >>buf, "Cadence types: %s %s" % (_cadence_type(sem1cad),
_cadence_type(sem2cad))
root2 = sem2cad.root.label
root2 = ((root2[0]+trans[0])%4, (root2[1]+trans[1])%3)
print >>buf, "Resolutions: %s %s" % (sem1cad.root.label, root2)
common = lces(sem1cad, sem2cad)
print >>buf, "Shared structure: %s (size %d)" % (common, len(common)-1)
print >>buf
return buf.getvalue()
class OptimizedDependencyRecovery(FScoreMetric):
"""
Aligns the two dependency graphs in the way that optimizes their
dependency recovery and reports that dependency recovery. This gives
a metric that can be used when the alignment between the graphs is not
known, such as when parsing MIDI.
"""
name = "optdeprec"
def _get_identifier(self):
ident = {
'f' : 'f-score',
'precision' : 'precision',
'recall' : 'recall',
'inversef' : 'inverse f-score',
}
return "dependency alignment %s" % ident[self.options['output']]
identifier = property(_get_identifier)
def fscore_match(self, sem1, sem2):
from jazzparser.formalisms.music_halfspan.harmstruct import \
semantics_to_dependency_graph
from jazzparser.data.dependencies import optimal_node_alignment, \
alignment_to_graph
from jazzparser.formalisms.music_halfspan.semantics import \
EnharmonicCoordinate
if sem1 is None:
max_score1 = 0.0
else:
graph1,timings1 = semantics_to_dependency_graph(sem1)
max_score1 = float(len(graph1))
if sem2 is None:
max_score2 = 0.0
else:
graph2,timings2 = semantics_to_dependency_graph(sem2)
max_score2 = float(len(graph2))
if sem1 is None or sem2 is None:
# Empty input: give zero score to everything
alignment_score = 0.0
alignment = []
transpose = None
else:
graph1,timings1 = semantics_to_dependency_graph(sem1)
graph2,timings2 = semantics_to_dependency_graph(sem2)
graphs = []
# Try all possible transpositions and assume the best
for transx in range(4):
for transy in range(3):
def _label_compare(label1, label2):
if isinstance(label1, EnharmonicCoordinate) and \
isinstance(label2, EnharmonicCoordinate):
coord1 = label1.zero_coord
x2,y2 = label2.zero_coord
return coord1 == ((x2+transx)%4, (y2+transy)%3)
else:
return label1 == label2
# Find the alignment of the nodes that matches most dependencies
alignment = optimal_node_alignment(graph1, graph2, label_compare=_label_compare)
# Get the common dependency graph
graph, node_map1, node_map2 = alignment_to_graph(alignment,
graph1, graph2, label_compare=_label_compare)
graphs.append(graph)
# Score on the basis of the shared dependencies
alignment_score,graph = max([(len(graph),graph) for graph in graphs], key=lambda x:x[0])
return alignment_score,max_score1,max_score2
class DependencyRecovery(FScoreMetric):
"""
Exact dependency recovery metric. Only matches two nodes to each other
if they have the same time attached to them. This is for use with results
where we know the input is the same as that over which the gold standard
is defined.
For example, evaluating chord sequence parsing against the
corpus we know this. It won't work, however, evaluating midi parsing
against the chord corpus.
It is also not pitch-independent, since it's only useful where the input
over which the result was produced is the same anyway.
"""
name = "deprec"
def _get_identifier(self):
ident = {
'f' : 'f-score',
'precision' : 'precision',
'recall' : 'recall',
'inversef' : 'inverse f-score',
}
return "dependency recovery %s" % ident[self.options['output']]
identifier = property(_get_identifier)
def fscore_match(self, sem1, sem2):
from jazzparser.formalisms.music_halfspan.harmstruct import \
semantics_to_dependency_graph
from jazzparser.data.dependencies import optimal_node_alignment, \
alignment_to_graph
from jazzparser.formalisms.music_halfspan.semantics import \
EnharmonicCoordinate
if sem1 is None:
max_score1 = 0.0
else:
graph1,timings1 = semantics_to_dependency_graph(sem1)
max_score1 = float(len(graph1))
if sem2 is None:
max_score2 = 0.0
else:
graph2,timings2 = semantics_to_dependency_graph(sem2)
max_score2 = float(len(graph2))
if sem1 is None or sem2 is None:
# Empty input: give zero score to everything
alignment_score = 0.0
alignment = []
transpose = None
else:
graph1,timings1 = semantics_to_dependency_graph(sem1)
graph2,timings2 = semantics_to_dependency_graph(sem2)
node_pairs = []
# Always align the root nodes to each other
node_pairs.append((min(graph1.nodes), min(graph2.nodes)))
# Align nodes that occur at the same time
time_nodes1 = dict([(time,node) for (node,time) in timings1.items()])
for node2,time in sorted(timings2.items(), key=lambda x:x[1]):
if time in time_nodes1:
node_pairs.append((time_nodes1[time], node2))
def _label_compare(label1, label2):
if isinstance(label1, EnharmonicCoordinate) and \
isinstance(label2, EnharmonicCoordinate):
return label1.zero_coord == label2.zero_coord
else:
return label1 == label2
# Get the graph of shared dependencies that results from aligning
# simultaneous nodes
graph,node_map1,node_map2 = alignment_to_graph(node_pairs,
graph1, graph2, label_compare=_label_compare)
# Score on the basis of the shared dependencies
alignment_score = len(graph)
return alignment_score,max_score1,max_score2
class RandomDistance(DistanceMetric):
"""
Returns a distance by picking a random number. This is useful for
establishing a random baseline on evaluations.
Obviously it won't be the same for two calls on the same inputs.
"""
OPTIONS = []
name = "rand"
def distance(self, sem1, sem2):
import random
return random.random()
class DependencyGraphSize(DistanceMetric):
"""
This is a baseline metric that does nothing clever. It's designed to
show how well a system could do just by comparing the lengths of the
two analyses in terms of the number of dependencies in them.
We'd hope it wouldn't do very well, but it's an important
baseline to try.
The distance is the inverse ratio between the lengths, always between 0
and 1.
"""
OPTIONS = []
name = "depsize"
def distance(self, sem1, sem2):
from jazzparser.formalisms.music_halfspan.harmstruct import \
semantics_to_dependency_trees
# Get dependency graphs for the two logical forms
trees1 = semantics_to_dependency_trees(sem1)
trees2 = semantics_to_dependency_trees(sem2)
# Count the number of dependencies in each graph
len1 = sum([len(tree) for tree in trees1])
len2 = sum([len(tree) for tree in trees2])
# Take the ratio between the sizes
if len1 > len2:
return 1.0 - (float(len2) / len1)
else:
return 1.0 - (float(len1) / len2)
| gpl-3.0 | 4,548,497,790,447,544,000 | 40.242972 | 102 | 0.542431 | false |
AntonioMtn/NZBMegaSearch | werkzeug/local.py | 1 | 13380 | # -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from wsgi import ClosingIterator
from _internal import _patch_wrapper
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident.
try:
from greenlet import getcurrent as get_ident
except ImportError: # pragma: no cover
try:
from thread import get_ident
except ImportError: # pragma: no cover
from dummy_thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`StackLocal` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
Yu can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return _patch_wrapper(func, self.make_middleware(func))
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __nonzero__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
| gpl-2.0 | -5,777,710,656,410,376,000 | 32.959391 | 78 | 0.577055 | false |
robynsen/adventofcode2016 | aoc10-part1.py | 1 | 2772 | import re
def add_value(my_bots, bot_i, val_n):
if bot_i in my_bots:
my_bots[bot_i].append(val_n)
else:
my_bots[bot_i] = [val_n]
def transfer_chips(my_bots, bot_id):
# note: does not cater to both high and low going to same bot that currently holds one chip
for key, value in my_bot_instr[bot_id].items():
if value != 'OUTPUT' and has_max_chips(my_bots, value):
transfer_chips(my_bots, value)
# now the recipients will have < 2 chips
tmp = 0
for key, value in my_bot_instr[bot_id].items():
if key == 'LOW':
tmp = min(my_bots[bot_id])
else:
tmp = max(my_bots[bot_id])
if value != 'OUTPUT':
add_value(my_bots, value, tmp)
my_bots[bot_id].remove(tmp)
def has_max_chips(my_bots, bot_id):
return (bot_id in my_bots and (len(my_bots[bot_id]) > 1))
with open('aoc10-input.txt', 'r') as infile:
# format: value 5 goes to bot 2
add_regex = re.compile(r"value ([0-9]+) goes to bot ([0-9]+)")
# format: bot 2 gives low to bot 1 and high to bot 0
move_regex = re.compile(r"bot ([0-9]+) gives low to (bot|output) ([0-9]+) and high to (bot|output) ([0-9]+)")
# x = for each both ID, a list of chip IDs it holds
my_bots = {}
# x = for each bot ID, a dict of key, value = LOW/HIGH, next bot ID
my_bot_instr = {}
for line in infile:
add_result = add_regex.match(line)
move_result = move_regex.match(line)
if add_result:
my_value = int(add_result.group(1))
bot_target = int(add_result.group(2))
add_value(my_bots, bot_target, my_value)
elif move_result:
bot_src = int(move_result.group(1))
instr_low = move_result.group(2)
bot_low = int(move_result.group(3))
instr_high = move_result.group(4)
bot_high = int(move_result.group(5))
my_bot_instr[bot_src] = {}
for i in ((instr_low, bot_low, 'LOW'), (instr_high, bot_high, 'HIGH')):
if i[0] == 'bot':
my_bot_instr.setdefault(bot_src,[]).update({i[2]: i[1]})
elif i[0] == 'output':
my_bot_instr.setdefault(bot_src,[]).update({i[2]: 'OUTPUT'})
result = False
while not result:
# find bot with two chips and pass those on
for key, value in my_bots.items():
if len(value) > 1:
transfer_chips(my_bots, key)
break
for key, value in my_bots.items():
if 17 in value and 61 in value:
print(key)
result = True
| mit | 4,804,833,572,897,079,000 | 35.972603 | 113 | 0.516955 | false |
HewlettPackard/oneview-ansible | library/module_utils/icsp.py | 1 | 2419 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
from future import standard_library
from six.moves.urllib.parse import quote
standard_library.install_aliases()
class ICspHelper(object):
def __init__(self, connection):
"""
ICspHelper constructor.
Args:
connection (connection): ICsp connection.
"""
self.connection = connection
def get_build_plan(self, bp_name):
search_uri = '/rest/index/resources?filter="name=\'' + quote(bp_name) + '\'"&category=osdbuildplan'
search_result = self.connection.get(search_uri)
if search_result['count'] > 0 and search_result['members'][0]['name'] == bp_name:
return search_result['members'][0]
return None
def get_server_by_ilo_address(self, ilo):
servers = self.connection.get("/rest/os-deployment-servers/?count=-1")
for srv in servers['members']:
if srv['ilo']:
if srv['ilo']['ipAddress'] == ilo:
return srv
return None
def get_server_by_serial(self, serial):
search_uri = '/rest/index/resources?category=osdserver&query=\'osdServerSerialNumber:\"' + serial + '\"\''
search_result = self.connection.get(search_uri)
if search_result['count'] > 0:
same_serial_number = search_result['members'][0]['attributes']['osdServerSerialNumber'] == serial
if same_serial_number:
server_id = search_result['members'][0]['attributes']['osdServerId']
server = {'uri': '/rest/os-deployment-servers/' + server_id}
return server
return None
| apache-2.0 | 825,880,208,411,244,400 | 35.104478 | 114 | 0.625052 | false |
donnell74/CSC-450-Scheduler | genetic/interface.py | 1 | 50921 | import csv
from structures import *
import os
import yaml
import xml.etree.ElementTree as ET
from scheduler import *
from time import strftime, gmtime
from weakref import ref
from datetime import date, time as time_obj
import constraint
from Tkinter import Tk
from tkMessageBox import showinfo
def course_has_all_attributes(course):
"""
Determine if a course object (built from yaml override, not a true Course object)
has all requisite properties (code, period, credit, instructor,
capacity, needs_computers, is_lab)
IN: course object from yaml override
OUT: True if course has all attributes else False
"""
required_attributes = ["code", "period", "credit", "instructor", "capacity", "needs_computers", "is_lab"]
for attribute in required_attributes:
if attribute not in course:
return False
return True
def room_has_all_attributes(room):
"""
Determine if a room object (built from yaml override, not a true Room object)
has all requisite properties (building, number, capacity, has_computers)
IN: room object from yaml override
OUT: True if room has all attributes else False
"""
required_attributes = ["building", "number", "capacity", "has_computers"]
for attribute in required_attributes:
if attribute not in room:
return False
return True
def get_semester_to_schedule(path_to_yaml):
"""
Given the path to the override file, return the specific semester to be planned.
If no valid override input is found, use today's date to guess at the semester
and year. For example, if it is currently Fall or Winter, the guess will
be for the Spring of the same year. If it is currently Spring or Summer,
the guess will be for the Fall of next year.
IN: path to yaml override file.
OUT: a tuple representing the semester and year to be planned
e.g.: ('Fall', 2015)
"""
try:
yaml_file = open(path_to_yaml, 'r')
yaml_dict = yaml.load(yaml_file)
yaml_file.close()
semester_object = yaml_dict['data']['semester'].split(' ')
fall_or_spring = semester_object[0].lower()
if fall_or_spring in ['fall', 'fa', 'f']:
fall_or_spring = "Fall"
elif fall_or_spring in ['spring', 'sp', 's']:
fall_or_spring = "Spring"
else:
raise IOError("Error: Invalid semester input.")
# we know at least fall / spring
if fall_or_spring == "Fall":
year_upcoming = date.today().year
else:
year_upcoming = date.today().year + 1
if len(semester_object) == 1: # no year is specified
semester_year = year_upcoming
elif int(semester_object[1]) < year_upcoming: # don't schedule past years
semester_year = year_upcoming
else:
semester_year = int(semester_object[1])
return (fall_or_spring, semester_year)
except (IOError, TypeError): # override file not found or invalid information entered;
# guess the semester to schedule
# "day of year" ranges for the northern hemisphere
spring_or_summer = range(80, 264)
# fall_or_winter = everything else
# get today's 'day number'. (Jan 1st -> 1)
day_num = date.today().timetuple().tm_yday
"""
If it is spring or summer, we guess that the semester to plan
is the upcoming fall. Otherwise, we guess it is the upcoming spring.
"""
if day_num in spring_or_summer:
# we guess that we're planning for fall of the same year
fall_or_spring = "Fall"
semester_year = date.today().year
else:
# we guess that we're planning for spring of next year
fall_or_spring = "Spring"
semester_year = date.today().year + 1
print "No override found. Guessing at the semester to be planned:",
print fall_or_spring, semester_year
return (fall_or_spring, semester_year)
except ValueError:
print "Invalid input. Please use the format 'Fall 2015'"
return
except Exception as e:
print e
return
## Function that Creates an xml input file (Input.xml) from yaml
# @param path_to_global The path to the global input
# @param path_to_override The path to the override input
# @return none
def create_xml_input_from_yaml(path_to_global, path_to_override):
"""
Creates an xml input file (Input.xml) from yaml.
IN: path to yaml input
OUT: None (does not return anything; creates Input.xml in genetic/seeds/)
"""
def course_object_to_xml_string(code, period, credit, instructor, prereq,
capacity, needs_computers, is_lab):
# since "prereq='{}'".format('') -> "prereq=''''", we need an ugly conditional
if prereq:
unformatted_xml_string = ("<item code='{0}' period='{1}' credit='{2}' instructor='{3}' prereq='{4}' "
"capacity='{5}' needs_computers='{6}' is_lab='{7}'></item>")
return unformatted_xml_string.format(code.upper(), period.upper(), credit, enforce_capital_first_letter(instructor),
prereq.upper(), capacity, needs_computers, is_lab)
else: # prereq == None
unformatted_xml_string = ("<item code='{0}' period='{1}' credit='{2}' instructor='{3}' prereq='' "
"capacity='{4}' needs_computers='{5}' is_lab='{6}'></item>")
return unformatted_xml_string.format(code.upper(), period.upper(), credit, enforce_capital_first_letter(instructor),
capacity, needs_computers, is_lab)
def room_object_to_xml_string(building, number, capacity, has_computers):
unformatted_xml_string = ("<item building='{0}' number='{1}' capacity='{2}' "
"has_computers='{3}'></item>")
return unformatted_xml_string.format(building.upper(), number, capacity, has_computers)
def print_number_spaces(num):
return " " * num
def print_indent(num):
return print_number_spaces(2 * num)
def schedule_tag(name):
return "<schedule name='{0}'>".format(name)
def tag(name, closing = False):
if closing == True:
return "</{0}>".format(name)
else:
return "<{0}>".format(name)
def newline():
return "\n"
def xml_header():
return "<?xml version='1.0'?>"
def valid_credit_hour_input():
''' Validates that course credit hours are 1, 3, or 4.
And that a lab is only 1 credit hour.
Returns False if credit input is invalid.'''
error_title = ''
error_message = ''
is_valid_input = True
invalid_course_credits = False
invalid_lab_credits = False
# check for invalid credit hours
for course in course_list:
if not course['credit'] in [1, 3, 4] and course['is_lab'] == 0:
if not invalid_course_credits:
invalid_course_credits = True
else:
continue
error_title = 'Error: course credit hours'
error_message = 'Error for course {0}\n.'.format(course['code']) + \
'The course credit hour "' + str(course['credit']) + \
'" is \nnot an acceptable credit hour.' + \
'\nCredit hours must ' + \
'be 1, 3, or 4.\n' + \
'\nPlease change this in:\n' + \
'genetic\seeds\input.yaml'
is_valid_input = False
show_error_message(error_title, error_message)
if course['is_lab'] == 1 and course['credit'] != 1:
if not invalid_lab_credits:
invalid_lab_credits = True
else:
continue
error_title = 'Error: lab credit hours'
error_message = 'Error for course {0}.\n'.format(course['code']) + \
'The lab credit hour "' + str(course['credit']) + \
'" is \nnot an acceptable lab credit.' + \
'\nLab credit must be 1 hour.\n' + \
'\nPlease change this in:\n' + \
'genetic\seeds\input.yaml'
is_valid_input = False
show_error_message(error_title, error_message)
return is_valid_input
def show_error_message(error_title, error_message):
''' Displays an error message '''
root = Tk()
root.withdraw() # hide tkinter window
# display tkMessageBox
showinfo(error_title, error_message)
def this_course_in_course_list(course_code, course_list):
for course in course_list:
if course_code == course['code']:
return True
return False
def update_attribute_in_course_list(course_code, attr, new_attr, course_list):
try:
for course in course_list:
if course_code == course['code']:
course[attr] = str(new_attr)
except:
print "There was a problem with updating '" + str(attr) + "' for " + str(course_code)
return course_list
try:
global_file = open(path_to_global, 'r')
global_dict = yaml.load(global_file)
global_file.close()
yaml_data_object = global_dict['data']['schedule']
schedule_name = yaml_data_object['name']
course_list = yaml_data_object['course_list']
time_list_tr = yaml_data_object['time_list_tr']
time_list_mwf = yaml_data_object['time_list_mwf']
room_list = yaml_data_object['room_list']
if not valid_credit_hour_input():
exit() # exit the scheduler
override_file = open(path_to_override, 'r')
override_dict = yaml.load(override_file)
override_file.close()
course_overrides = override_dict['data']['course_list']
room_overrides = override_dict['data']['room_list']
# if there are overrides, update the global vars before outputting
if course_overrides:
for this_course in course_overrides:
# at least the course code must be specified
if 'code' in this_course:
this_code = this_course['code']
if this_course_in_course_list(this_code, course_list):
# course already exists; modify information
if 'period' in this_course:
new_period = this_course['period']
course_list = update_attribute_in_course_list(this_code, "period",
new_period, course_list)
if 'instructor' in this_course:
new_instructor = this_course['instructor']
course_list = update_attribute_in_course_list(this_code, "instructor",
new_instructor, course_list)
if 'prereq' in this_course:
new_prereq = this_course['prereq']
course_list = update_attribute_in_course_list(this_code, "prereq",
new_prereq, course_list)
if 'capacity' in this_course:
new_capacity = this_course['capacity']
course_list = update_attribute_in_course_list(this_code, "capacity",
new_capacity, course_list)
if 'needs_computers' in this_course:
new_needs_computers = this_course['needs_computers']
course_list = update_attribute_in_course_list(this_code, "needs_computers",
new_needs_computers, course_list)
if 'is_lab' in this_course:
new_is_lab = this_course['is_lab']
course_list = update_attribute_in_course_list(this_code, "is_lab",
new_is_lab, course_list)
# course does not already exist; check for required information
elif course_has_all_attributes(this_course):
new_course = {'code': this_course['code'].upper(),
'period': this_course['period'].upper(),
'credit': this_course['credit'],
'instructor': enforce_capital_first_letter(this_course['instructor']),
'prereq': this_course['prereq'].upper(),
'capacity': this_course['capacity'],
'needs_computers': this_course['needs_computers'],
'is_lab': this_course['is_lab']}
course_list.append(new_course)
else:
print "Incomplete information supplied in the override for " + \
this_course['code'] + ". Ignoring..."
if room_overrides:
for this_room in room_overrides:
if room_has_all_attributes(this_room):
new_room = {"building": this_room['building'].upper(),
"number": this_room['number'],
"capacity": this_room['capacity'],
"has_computers": this_room['has_computers']}
room_list.append(new_room)
# now that we have dealt with any existing overrides, output to xml
xml_file = open('./genetic/seeds/Input.xml', 'w')
indent_level = 0
xml_file.write(print_indent(indent_level) + xml_header() + newline())
xml_file.write(print_indent(indent_level) + tag("data") + newline())
indent_level += 1
xml_file.write(print_indent(indent_level) + schedule_tag(schedule_name) + newline())
indent_level += 1
xml_file.write(print_indent(indent_level) + tag("courseList") + newline())
indent_level += 1
for course in course_list:
if course['prereq'] == None:
course_prereq = ""
else:
course_prereq = course['prereq']
course_xml_string = course_object_to_xml_string(code = course['code'],
period = course['period'],
credit = course['credit'],
instructor = course['instructor'],
prereq = course_prereq,
capacity = course['capacity'],
needs_computers = course['needs_computers'],
is_lab = course['is_lab'])
xml_file.write(print_indent(indent_level) + course_xml_string + newline())
indent_level -= 1
xml_file.write(print_indent(indent_level) + tag("courseList", closing = True) + newline())
xml_file.write(print_indent(indent_level) + tag("roomList") + newline())
indent_level += 1
for room in room_list:
room_xml_string = room_object_to_xml_string(building = room['building'],
number = room['number'],
capacity = room['capacity'],
has_computers = room['has_computers'])
xml_file.write(print_indent(indent_level) + room_xml_string + newline())
indent_level -= 1
xml_file.write(print_indent(indent_level) + tag("roomList", closing = True) + newline())
xml_file.write(print_indent(indent_level) + tag("timeListMWF") + newline())
indent_level += 1
for time_slot in time_list_mwf:
xml_time_slot_string = "{0}{1}{2}".format(tag("item"), time_slot, tag("item", closing = True))
xml_file.write(print_indent(indent_level) + xml_time_slot_string + newline())
indent_level -= 1
xml_file.write(print_indent(indent_level) + tag("timeListMWF", closing = True) + newline())
xml_file.write(print_indent(indent_level) + tag("timeListTR") + newline())
indent_level += 1
for time_slot in time_list_tr:
xml_time_slot_string = "{0}{1}{2}".format(tag("item"), time_slot, tag("item", closing = True))
xml_file.write(print_indent(indent_level) + xml_time_slot_string + newline())
indent_level -= 1
xml_file.write(print_indent(indent_level) + tag("timeListTR", closing = True) + newline())
indent_level -= 1
xml_file.write(print_indent(indent_level) + tag("schedule", closing = True) + newline())
indent_level -= 1
xml_file.write(print_indent(indent_level) + tag("data", closing = True) + newline())
xml_file.close()
return
except Exception as exception_instance:
print(exception_instance)
return None
def enforce_capital_first_letter(str):
""" ensures that a string takes the form "Abcd" instead of "abcd" or "ABCD" """
first_letter = str[:1]
rest_of_str = str[1:]
first_letter = first_letter.upper()
rest_of_str = rest_of_str.lower()
fixed_str = first_letter + rest_of_str
return fixed_str
def create_constraints_from_yaml(path_to_yaml, scheduler, instructor_objs):
""" Takes an input YAML file (default_constraints.yaml) and generates appropriate
constraints, then adds them to the scheduler.
NOTE: Helper functions are defined first, and then the parsing and generating begins
IN: a YAML file containing all the default constraints
OUT: the constraints will be added to the program and displayed in the Added Constraints
screen on the constraint page
"""
def pull_instructor_obj(instructor_name):
""" finds the instructor object for the given name """
for instr in instructor_objs:
if instructor_name.upper() == instr.name.upper():
return instr
def str_to_time(time_str):
""" converts a time string ("12:30") into a time obj """
t_hr, t_min = time_str.split(":")
return time_obj( int(t_hr), int(t_min) )
def get_priority_value(priority):
""" Turns the string value of priority into the
appropriate weight (int) value. """
priority = enforce_capital_first_letter(priority)
priorities = {"Low": 10,
"Medium": 25,
"High": 50
}
# Look up number value from dict. Return 0 if mandatory
priority = priorities.get(priority, 0)
return priority
def course_time(constraint_dict, scheduler):
""" Takes a dictionary of data required for a course
time constraint: course_code, before_after, timeslot, priority.
IN: a dictionary with appropriate data fields
OUT: adds course constraint to scheduler.
"""
constraint_dict["code"] = constraint_dict["code"].upper()
constraint_name = constraint_dict["code"] + "_" +\
constraint_dict["before_after"].lower() + "_" \
+ constraint_dict["time"]
course_obj = constraint_dict["code"].upper()
if course_obj == "All":
course_obj = scheduler.courses
else: # find the course object
for c in scheduler.courses:
if course_obj == c.code: # found it
course_obj = c
break
# fail silently if invalid course
try:
if course_obj not in scheduler.courses:
return
except:
return
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
timeslot_obj = str_to_time(constraint_dict["time"])
# scheduler.courses is course list
if constraint_dict["before_after"].upper() == "BEFORE":
scheduler.add_constraint(constraint_name,
priority,
constraint.course_before_time,
[course_obj, timeslot_obj, is_mandatory])
else: # after constraint
scheduler.add_constraint(constraint_name,
priority,
constraint.course_after_time,
[course_obj, timeslot_obj, is_mandatory])
def course_day(constraint_dict, scheduler):
""" Takes a dictionary of data required for a course
day constraint: code, day_code, priority.
IN: a dictionary with appropriate data fields
OUT: adds course constraint to scheduler.
"""
constraint_name = constraint_dict["code"] + "_" + constraint_dict["day_code"].upper()
course_obj = constraint_dict["code"]
for c in scheduler.courses:
if course_obj == c.code: # found it
course_obj = c
break
# fail silently if invalid course
try:
if course_obj not in scheduler.courses:
return
except:
return
day_code = constraint_dict["day_code"].lower()
if len(day_code) == 0 or len(day_code) == 5:
return # drop silently, bad constraint
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
scheduler.add_constraint(constraint_name,
priority,
constraint.partial_schedule_day,
[course_obj, day_code, is_mandatory])
def course_room(constraint_dict, scheduler):
""" Takes a dictionary of data required for a course
room constraint: code, rooms, priority.
IN: a dictionary with appropriate data fields
OUT: adds course constraint to scheduler.
"""
rooms = constraint_dict["rooms"]
for r in rooms:
r = r.upper()
constraint_name = constraint_dict["code"] + "_" + rooms[0].upper()
if len(rooms) > 1:
constraint_name += "..."
course_obj = constraint_dict["code"]
for c in scheduler.courses:
if course_obj == c.code: # found it
course_obj = c
break
# fail silently if invalid course
try:
if course_obj not in scheduler.courses:
return
except:
return
if len(rooms) == 0:
return # drop silently, bad constraint
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
scheduler.add_constraint(constraint_name,
priority,
constraint.partial_schedule_room,
[course_obj, rooms, is_mandatory])
def avoid_overlap(constraint_dict, scheduler):
""" Takes a dictionary of data required for a manual
concurrence (avoid conflict) constraint: code,
start_time, end_time, courses, priority.
IN: a dictionary with appropriate data fields
OUT: adds course constraint to scheduler.
"""
courses = constraint_dict["courses"]
for i in range(len(courses)):
courses[i] = courses[i].upper()
constraint_name = "avoid overlap" + "_" + constraint_dict["code"].upper() + "_" + courses[0]
if len(courses) > 1:
constraint_name += "..."
course_objs = []
for each_course in courses:
for c in scheduler.courses:
if each_course.upper() == c.code.upper(): # found it
course_objs.append(c)
break
# fail silently if invalid course
try:
for each_course_obj in course_objs:
if each_course_obj not in scheduler.courses:
return
except:
return
if len(courses) == 0:
return # drop silently, bad constraint
day_code = constraint_dict["days"].lower()
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
start_time = str_to_time(constraint_dict["start_time"])
end_time = str_to_time(constraint_dict["end_time"])
scheduler.add_constraint(constraint_name,
priority,
constraint.avoid_overlap,
[course_objs, start_time,
end_time, day_code, is_mandatory])
def instructor_time_pref(constraint_dict, scheduler):
""" This takes in a dictionary of the data required for an
instructor time preference constraint. instr_name, before_after, time, priority.
IN: A dictionary with the appropriate data fields
OUT: adds the constraint to the scheduler
"""
constraint_name = constraint_dict["instr_name"] + \
"_prefers_" + \
constraint_dict["before_after"].lower() + \
"_" + constraint_dict["time"]
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
instr_obj = pull_instructor_obj(constraint_dict["instr_name"])
if instr_obj is None: return # instructor does not exist; do not add constraint
timeslot_obj = str_to_time(constraint_dict["time"])
if constraint_dict["before_after"].lower() == "before":
scheduler.add_constraint(constraint_name,
priority,
constraint.instructor_time_pref_before,
[instr_obj, timeslot_obj, is_mandatory] )
else: # after
scheduler.add_constraint(constraint_name,
priority,
constraint.instructor_time_pref_after,
[instr_obj, timeslot_obj, is_mandatory] )
def max_courses(constraint_dict, scheduler):
""" Takes a dictionary of required data to generate an
instructor_max_courses constraint.
IN: a dictionary of appropriate data
OUT: a max_courses constraint is added to the scheduler
"""
constraint_dict["instr_name"] = enforce_capital_first_letter(constraint_dict["instr_name"])
constraint_name = constraint_dict["instr_name"] + \
"_max_courses_" + str(constraint_dict["max_courses"])
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
max_courses = constraint_dict["max_courses"]
instr_obj = pull_instructor_obj(constraint_dict["instr_name"])
if instr_obj is None: return # instructor does not exist; do not add constraint
scheduler.add_constraint(constraint_name,
priority,
constraint.instructor_max_courses,
[instr_obj, max_courses, is_mandatory])
def computer_pref(constraint_dict, scheduler):
""" Takes a dictionary of required data to generate an
instructor computer preference constraint.
IN: a dictionary of appropriate data
OUT: a computer_pref constraint added to the scheduler
"""
constraint_dict["instr_name"] = enforce_capital_first_letter(constraint_dict["instr_name"])
constraint_name = constraint_dict["instr_name"] + \
"_prefers_computers_" + \
enforce_capital_first_letter(str(constraint_dict["prefers_computers"]))
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
instr_obj = pull_instructor_obj(constraint_dict["instr_name"])
if instr_obj is None: return # instructor does not exist; do not add constraint
prefers_computers = bool(enforce_capital_first_letter(str(constraint_dict["prefers_computers"])))
scheduler.add_constraint(constraint_name,
priority,
constraint.instructor_preference_computer,
[instr_obj, prefers_computers, is_mandatory])
def day_pref(constraint_dict, scheduler):
""" Takes a dictionary of required data to generate an
instructor day preference constraint.
IN: a dictionary of appropriate data
OUT: a day_pref constraint added to the scheduler
"""
constraint_dict["instr_name"] = enforce_capital_first_letter(constraint_dict["instr_name"])
constraint_name = constraint_dict["instr_name"] + \
"_prefers_" + constraint_dict["day_code"].upper()
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
instr_obj = pull_instructor_obj(constraint_dict["instr_name"])
if instr_obj is None: return # instructor does not exist; do not add constraint
day_code = constraint_dict["day_code"].lower()
if len(day_code) == 0 or len(day_code) == 5:
return # drop silently, bad constraint
scheduler.add_constraint(constraint_name,
priority,
constraint.instructor_preference_day,
[instr_obj, day_code, is_mandatory])
def instructor_break(constraint_dict, scheduler):
""" Takes a dictionary of required data to generate an
instructor break constraint.
IN: a dictionary of appropriate data
OUT: an instructor_break constraint added to the scheduler
"""
constraint_dict["instr_name"] = enforce_capital_first_letter(constraint_dict["instr_name"])
constraint_name = constraint_dict["instr_name"] + \
"_break_" + constraint_dict["break_start"] + \
"_" + constraint_dict["break_end"]
priority = get_priority_value(constraint_dict["priority"])
if priority == 0:
is_mandatory = True
else:
is_mandatory = False
instr_obj = pull_instructor_obj(constraint_dict["instr_name"])
if instr_obj is None: return # instructor does not exist; do not add constraint
gap_start = str_to_time(constraint_dict["break_start"])
gap_end = str_to_time(constraint_dict["break_end"])
scheduler.add_constraint(constraint_name,
priority,
constraint.instructor_break_constraint,
[instr_obj, gap_start, gap_end, is_mandatory])
# begin parsing YAML
input_file = file(path_to_yaml, "r")
yaml_dict = yaml.load(input_file)
if yaml_dict["data"]["constraint_list"]["course_constraints"] is not None:
# course constraints exist
course_constraints = yaml_dict["data"]["constraint_list"]["course_constraints"]
for type in course_constraints:
if course_constraints[type] is not None:
# course constraints exist
for i in range(len(course_constraints[type])): # create each constraint of each type
this_constraint = course_constraints[type][i]
if type == "time":
course_time(this_constraint, scheduler)
elif type == "day":
course_day(this_constraint, scheduler)
elif type == "room":
course_room(this_constraint, scheduler)
elif type == "avoid_overlap":
avoid_overlap(this_constraint, scheduler)
if yaml_dict["data"]["constraint_list"]["instructor_constraints"] is not None:
instr_constraints = yaml_dict["data"]["constraint_list"]["instructor_constraints"]
for type in instr_constraints:
if instr_constraints[type] is not None:
# instructor constraints exist
for i in range(len(instr_constraints[type])): # create every constraint of each type
this_constraint = instr_constraints[type][i]
instr_constraints[type][i]["instr_name"] = enforce_capital_first_letter(instr_constraints[type][i]["instr_name"])
if type == "time_pref":
instructor_time_pref(this_constraint, scheduler)
elif type == "max_courses":
max_courses(this_constraint, scheduler)
elif type == "day_pref":
day_pref(this_constraint, scheduler)
elif type == "computer_pref":
computer_pref(this_constraint, scheduler)
elif type == "instructor_break":
instructor_break(this_constraint, scheduler)
## Function that reads an xml file and schedules all courses found in it
# @param path_to_xml The path_to_xml parameter
# @param slot_divide The slot_divide parameter
# @return return_schedule
def create_scheduler_from_file_test(path_to_xml, slot_divide = 2):
"""Reads in an xml file and schedules all courses found in it
IN: path to xml file as string
OUT: scheduler object with one week based on the xml input"""
tree = ET.parse(path_to_xml)
root = tree.getroot()
instructors = create_instructors_from_courses(path_to_xml)
instructors_dict = dict(zip([inst.name for inst in instructors],
[inst for inst in instructors]))
courses = create_course_list_from_file(path_to_xml, instructors_dict)
rooms = create_room_list_from_file(path_to_xml)
time_slots_mwf, time_slots_tr = create_time_slot_list_from_file(path_to_xml)
time_slot_divide = slot_divide
course_titles = [course.code for course in courses]
setCourses = [i.attrib for i in root.findall("course")]
return_schedule = Scheduler(courses, rooms, time_slots_mwf, time_slots_tr,
int(time_slot_divide))
return_schedule.weeks.append( structures.Week(rooms, return_schedule) )
return_schedule.weeks[0].fill_week(setCourses)
return_schedule.weeks[0].update_sections(return_schedule.courses)
return return_schedule
## Function that reads 5 xml file and creates week objects for these seeds
# @param list_of_weeks_to_schedule_on The list_of_weeks_to_schedule_on parameter
# @param path_to_seeds The path_to_seeds parameter
# @return list_of_weeks_to_schedule_on
def create_weeks_from_seeds(list_of_weeks_to_schedule_on, path_to_seeds):
"""Reads 5 XML files and creates week objects for these seeds
path_to_seeds should look like directory/seed
Seed number and .xml will be appended to it"""
counter = 0
for each_week in list_of_weeks_to_schedule_on[:5]:
counter += 1
tree = ET.parse(path_to_seeds + str(counter) + '.xml')
root = tree.getroot()
setCourses = [i.attrib for i in root.findall("course")]
each_week.fill_week(setCourses)
return list_of_weeks_to_schedule_on
## Function that creates a list of course objects without an instructors_dict
# @param path_to_xml The path_to_xml parameter
# @return None
def create_course_list_from_file_test(path_to_xml):
"""For testing purposes. Creates a list of course objects without an instructors_dict
IN: path to xml file as string
OUT: list of course objects based on xml"""
try:
tree = ET.parse(path_to_xml)
root = tree.getroot()
instructors = create_instructors_from_courses(path_to_xml)
instructors_dict = dict(zip([inst.name for inst in instructors],
[inst for inst in instructors]))
courses = create_course_list_from_file(path_to_xml, instructors_dict)
return courses
except Exception as inst:
print(inst)
return None
def course_should_be_scheduled(period):
"""
Determines if a course should be scheduled based on its periodicity.
For example, if the semester being scheduled is Fall and a course is
only taught in the Spring, it should be ignored.
IN: periodicity of course being considered ["F", "B", "S", "D"]
OUT: True if course should be scheduled else False
"""
yaml_override_path = "genetic/seeds/override.yaml"
this_semester = get_semester_to_schedule(yaml_override_path)[0]
return (period in ["B"]) or \
(this_semester == "Fall" and period == "F") or \
(this_semester == "Spring" and period == "S")
## Function that reads an xml file and creates a list of course objects
# @param path_to_xml The path_to_xml parameter
# @param instructors_dict The instructors_dict parameter
# @return List of course objects; courses are assigned to instructors
def create_course_list_from_file(path_to_xml, instructors_dict):
"""Reads an xml file and creates a list of course objects from it
IN: xml path and an instructors_dict (instructor name, instructor object)
OUT: list of course objects; courses are assigned to instructors"""
try:
tree = ET.parse(path_to_xml)
root = tree.getroot()
instructor_strings = [c.attrib["instructor"] for c in
root.find("schedule").find("courseList").getchildren()]
courses = []
for c in root.find("schedule").find("courseList").getchildren():
instructor = instructors_dict[c.attrib["instructor"]]
# only schedule courses with valid periodicity
if c.attrib["period"] in ["F", "S", "B", "D"]:
# only schedule courses with periodicity occuring in this semester
if course_should_be_scheduled(c.attrib["period"]):
course = Course(code = c.attrib["code"],
credit = int(c.attrib["credit"]),
instructor = instructor,
capacity = int(c.attrib["capacity"]),
needs_computers = bool(int(c.attrib["needs_computers"])),
is_lab = bool(int(c.attrib["is_lab"])))
instructor.add_course(course)
courses.append(course)
return courses
except Exception as inst:
print(inst)
return None
## Function that reads an xml file and creates a list of rooms objects
# @param path_to_xml The path_to_xml parameter
# @return List of rooms as strings
def create_room_list_from_file(path_to_xml):
"""Reads an xml file and creates a list of rooms (strings) from it
IN: path to xml file
OUT: list of rooms as strings"""
try:
tree = ET.parse(path_to_xml)
root = tree.getroot()
# rooms will be list of tuples
rooms = []
for r in root.find("schedule").find("roomList").getchildren():
# Make tuple with (building, number, capacity, has_computers)
room = (r.attrib["building"], r.attrib["number"], r.attrib["capacity"], r.attrib["has_computers"])
rooms.append(room)
return rooms
except Exception as inst:
print(inst)
return None
## Function that reads an xml file and creates a list of time slots
# @param path_to_xml The path_to_xml parameter
# @return Tuple of 2 lists of time slots as strings (mwf and tr)
def create_time_slot_list_from_file(path_to_xml):
"""Reads an xml file and creates lists of time slots (strings) from it for mwf and tr
IN: path to xml file
OUT: tuple of 2 lists of time slots as strings (mwf and tr)"""
try:
tree = ET.parse(path_to_xml)
root = tree.getroot()
time_slots_mwf = [r.text for r in root.find("schedule").find("timeListMWF").getchildren()]
time_slots_tr = [r.text for r in root.find("schedule").find("timeListTR").getchildren()]
return (time_slots_mwf, time_slots_tr)
except Exception as inst:
print(inst)
return None
## Function that reads an xml file and creates a dictionary of extras
# @param path_to_xml The path_to_xml parameter
# @return Dictionary of extras
def create_extras_list_from_file(path_to_xml):
"""Reads an xml file and creates a dictionary of extras
IN: path to xml file
OUT: dictionary of extras"""
try:
tree = ET.parse(path_to_xml)
root = tree.getroot()
extras = {}
extras["input"] = dict([(parent.tag, [child.text for child in parent\
.getchildren()]) for parent in \
root.find("extra").find("input").getchildren()])
extras["expected"] = dict([(parent.tag, [child.text for child in\
parent.getchildren()]) for parent in \
root.find("extra").getchildren()])["expected"]
return extras
except Exception as inst:
print(inst)
return None
## Function that reads an xml file and creates a list of unique instructor objects
# @param path_to_xml The path_to_xml parameter
# @return List of instructor objects
def create_instructors_from_courses(path_to_xml):
"""Reads an xml file and creates a list of unique instructor objects
IN: path to xml file
OUT: list of instructor objects"""
instructors = []
try:
tree = ET.parse(path_to_xml)
root = tree.getroot()
instructors_unique = []
instructors_raw = [course.attrib["instructor"] for course in
root.find("schedule").find("courseList").getchildren()]
for each_instructor in instructors_raw:
if each_instructor not in instructors_unique:
instructors_unique.append(each_instructor)
instructors_unique = map(lambda i: Instructor(i), instructors_unique)
return instructors_unique
except Exception as inst:
print(inst)
return None
# should be updated to final object attributes (pr)
def export_schedule_xml(week, extras="", prefix="", export_dir="./tests/schedules/"):
"""Exports given week as xml for testing purposes
IN: week object, extras string, prefix string, export directory
OUT: creates an xml file for the given input"""
timestr = strftime("%Y%m%d-%H%M%S", gmtime())
filename = os.path.join(export_dir, prefix + timestr + ".xml")
with open(filename, 'w') as out:
out.write("<?xml version='1.0'?>\n<data>\n")
out.write("<schedule name='" + timestr + "'>\n")
out.write("<courseList>\n")
for each_course in week.schedule.courses:
out.write("<item code='%s' credit='%d' instructor='%s' capacity='%d' needs_computers='%s'></item>\n"\
% (each_course.code, each_course.credit, each_course.instructor, \
each_course.capacity, each_course.needs_computers))
out.write("</courseList>\n")
out.write("<roomList>\n")
for each_room in week.days[0].rooms:
out.write("<item building='%s' number='%d' capacity='%d' has_computers='%s'\n" \
% (each_room.building, each_room.number, each_room.capacity, \
each_room.has_computers))
out.write("</roomList>\n")
out.write("<timeList>\n")
for each_slot in week.schedule.time_slots:
out.write("<item>%s</item>\n" % (each_slot))
out.write("</timeList>\n")
out.write("<timeSlotDivide>" + str(week.schedule.slot_divide) + "</timeSlotDivide>\n")
out.write("</schedule>\n")
# create the all the courses
courses_dyct = {
} # structure of {course_code : (day_code, room, start_time, end_time)}
instructors = []
for each_slot in week.list_time_slots():
if each_slot.course != None:
if courses_dyct.has_key(each_slot.course.code):
courses_dyct[each_slot.course.code][3] += each_slot.day
else:
courses_dyct[each_slot.course.code] = \
[each_slot.course.credit, \
each_slot.start_time, each_slot.end_time, \
each_slot.day, each_slot.room.building + " " + each_slot.room.number, \
each_slot.instructor]
if each_slot.instructor not in instructors:
instructors.append(each_slot.instructor)
for instructor in instructors:
for key in instructor.courses:
# course / credit / startTime / endTime / room number / instructor
out.write("""<course
code="%s"
credit="%s"
startTime="%s"
endTime="%s"
days="%s"
room="%s"
instructor="%s">
</course>\n""" % (str(key), str(courses_dyct[key.code][0]),\
str(courses_dyct[key.code][1])[:-3], str(courses_dyct[key.code][2])[:-3], courses_dyct[key.code][3],\
courses_dyct[key.code][4], courses_dyct[key.code][5]))
out.write("<extra>\n%s</extra>\n" % (extras))
out.write("</data>")
## Function that Exports top 5 valid schedules to csv f
# @param weeks The weeks parameter
# @param export_dir The export_dir parameter
# @return Up to 5 csv files for top 5 valid schedules
def export_schedules(weeks, export_dir="./"):
"""Exports top 5 valid schedules to csv
IN: list of week objects, export directory
OUT: up to 5 csv files for top 5 valid schedules"""
counter = 0
num_to_export = len(weeks)
for each_week in weeks:
if each_week.valid:
counter += 1
if counter > 5:
counter = 5
break
filename = os.path.join(export_dir, "schedule_" + str(counter) + ".csv")
if os.path.isfile(filename):
os.remove(filename)
with open(filename, 'w') as out:
out.write(each_week.print_concise().replace(' ', ','))
print("\nExporting " + str(counter) + " schedules")
counter += 1
while counter <= 5:
filename = os.path.join(
export_dir, "schedule_" + str(counter) + ".csv")
if os.path.isfile(filename):
os.remove(filename)
counter += 1
## Function that determine first-level prereqs from xml and list of all courses
# @param path_to_xml The path_to_xml parameter
# @param courses The courses parameter
# @return List of prereq objects
def get_prereqs(path_to_xml, courses):
"""Determine first-level prereqs from xml and list of all courses
IN: path to xml file, list of course objects
OUT: list of prereq objects"""
try:
tree = ET.parse(path_to_xml)
root = tree.getroot()
list_of_prereqs = []
for c in root.find("schedule").find("courseList").getchildren():
#get strings
prereq = c.attrib["prereq"]
if not prereq: #if this course has no prereqs
continue
course_string = c.attrib["code"]
#prepare for prereq operations
prereq = prereq.split(' ')
absolute_course = "".join(course_string.split(' ')[:2])
#prereq operations
prereq_obj = Prereq(absolute_course, courses)
for each_prereq in prereq:
prereq_obj.add_prereq(each_prereq, courses)
list_of_prereqs.append(prereq_obj)
return list_of_prereqs
except Exception as inst:
print(inst)
return None
## Function that Extends prereqs for prereq of each prereq
# @param prereqs The prereqs parameter
# @param courses The courses parameter
# @return list of prereq objects with extended prereqs accounted for
def get_extended_prereqs(prereqs, courses):
"""Extends prereqs for prereq of each prereq
IN: list of prereq objects, list of all course objects
OUT: list of prereq objects with extended prereqs accounted for"""
def find_prereq(prereq_absolute_course, prereqs):
for p in prereqs:
if p.absolute_course == prereq_absolute_course:
return p
#if not found
return None
#courses with a prereq
covered = [p.absolute_course for p in prereqs]
for each_prereq in prereqs: #prereq object
if len(each_prereq.absolute_prereqs) == 0:
continue
for each_absolute_prereq in covered:
if each_absolute_prereq in each_prereq.absolute_prereqs:
derived_prereq_obj = find_prereq(each_absolute_prereq, prereqs)
derived_prereqs = derived_prereq_obj.absolute_prereqs
#next(p for p in covered if each_absolute_prereq == p)
for each_derived in derived_prereqs:
each_prereq.add_prereq(each_derived, courses)
return prereqs
| mit | 4,397,106,580,024,335,400 | 42.821859 | 133 | 0.560319 | false |
slackapi/python-slackclient | slack_sdk/models/views/__init__.py | 1 | 8553 | import copy
import logging
from typing import List, Optional, Union, Dict
from slack_sdk.models.basic_objects import JsonObject, JsonValidator
from slack_sdk.models.blocks import Block, TextObject, PlainTextObject, Option
class View(JsonObject):
"""View object for modals and Home tabs.
https://api.slack.com/reference/surfaces/views
"""
types = ["modal", "home", "workflow_step"]
attributes = {
"type",
"id",
"callback_id",
"external_id",
"team_id",
"bot_id",
"app_id",
"root_view_id",
"previous_view_id",
"title",
"submit",
"close",
"blocks",
"private_metadata",
"state",
"hash",
"clear_on_close",
"notify_on_close",
}
def __init__(
self,
# "modal", "home", and "workflow_step"
type: str, # skipcq: PYL-W0622
id: Optional[str] = None, # skipcq: PYL-W0622
callback_id: Optional[str] = None,
external_id: Optional[str] = None,
team_id: Optional[str] = None,
bot_id: Optional[str] = None,
app_id: Optional[str] = None,
root_view_id: Optional[str] = None,
previous_view_id: Optional[str] = None,
title: Union[str, dict, PlainTextObject] = None,
submit: Optional[Union[str, dict, PlainTextObject]] = None,
close: Optional[Union[str, dict, PlainTextObject]] = None,
blocks: Optional[List[Union[dict, Block]]] = None,
private_metadata: Optional[str] = None,
state: Optional[Union[dict, "ViewState"]] = None,
hash: Optional[str] = None, # skipcq: PYL-W0622
clear_on_close: Optional[bool] = None,
notify_on_close: Optional[bool] = None,
**kwargs,
):
self.type = type
self.id = id
self.callback_id = callback_id
self.external_id = external_id
self.team_id = team_id
self.bot_id = bot_id
self.app_id = app_id
self.root_view_id = root_view_id
self.previous_view_id = previous_view_id
self.title = TextObject.parse(title, default_type=PlainTextObject.type)
self.submit = TextObject.parse(submit, default_type=PlainTextObject.type)
self.close = TextObject.parse(close, default_type=PlainTextObject.type)
self.blocks = Block.parse_all(blocks)
self.private_metadata = private_metadata
self.state = state
self.hash = hash
self.clear_on_close = clear_on_close
self.notify_on_close = notify_on_close
self.additional_attributes = kwargs
title_max_length = 24
blocks_max_length = 100
close_max_length = 24
submit_max_length = 24
private_metadata_max_length = 3000
callback_id_max_length: int = 255
@JsonValidator('type must be either "modal", "home" or "workflow_step"')
def _validate_type(self):
return self.type is not None and self.type in self.types
@JsonValidator(f"title must be between 1 and {title_max_length} characters")
def _validate_title_length(self):
return self.title is None or 1 <= len(self.title.text) <= self.title_max_length
@JsonValidator(f"views must contain between 1 and {blocks_max_length} blocks")
def _validate_blocks_length(self):
return self.blocks is None or 0 < len(self.blocks) <= self.blocks_max_length
@JsonValidator("home view cannot have submit and close")
def _validate_home_tab_structure(self):
return self.type != "home" or (
self.type == "home" and self.close is None and self.submit is None
)
@JsonValidator(f"close cannot exceed {close_max_length} characters")
def _validate_close_length(self):
return self.close is None or len(self.close.text) <= self.close_max_length
@JsonValidator(f"submit cannot exceed {submit_max_length} characters")
def _validate_submit_length(self):
return self.submit is None or len(self.submit.text) <= int(
self.submit_max_length
)
@JsonValidator(
f"private_metadata cannot exceed {private_metadata_max_length} characters"
)
def _validate_private_metadata_max_length(self):
return (
self.private_metadata is None
or len(self.private_metadata) <= self.private_metadata_max_length
)
@JsonValidator(f"callback_id cannot exceed {callback_id_max_length} characters")
def _validate_callback_id_max_length(self):
return (
self.callback_id is None
or len(self.callback_id) <= self.callback_id_max_length
)
def __str__(self):
return str(self.get_non_null_attributes())
def __repr__(self):
return self.__str__()
class ViewState(JsonObject):
attributes = {"values"}
logger = logging.getLogger(__name__)
@classmethod
def _show_warning_about_unknown(cls, value):
c = value.__class__
name = ".".join([c.__module__, c.__name__])
cls.logger.warning(
f"Unknown type for view.state.values detected ({name}) and ViewState skipped to add it"
)
def __init__(
self, *, values: Dict[str, Dict[str, Union[dict, "ViewStateValue"]]],
):
value_objects: Dict[str, Dict[str, ViewStateValue]] = {}
new_state_values = copy.copy(values)
for block_id, actions in new_state_values.items():
if actions is None: # skipcq: PYL-R1724
continue
elif isinstance(actions, dict):
new_actions = copy.copy(actions)
for action_id, v in actions.items():
if isinstance(v, dict):
d = copy.copy(v)
value_object = ViewStateValue(**d)
elif isinstance(v, ViewStateValue):
value_object = v
else:
self._show_warning_about_unknown(v)
continue
new_actions[action_id] = value_object
value_objects[block_id] = new_actions
else:
self._show_warning_about_unknown(v)
self.values = value_objects
def to_dict(self, *args) -> Dict[str, Dict[str, Dict[str, dict]]]: # type: ignore
self.validate_json()
if self.values: # skipcq: PYL-R1705
dict_values: Dict[str, Dict[str, dict]] = {}
for block_id, actions in self.values.items():
if actions:
dict_value: Dict[str, dict] = {
action_id: value.to_dict() # type: ignore
for action_id, value in actions.items() # type: ignore
}
dict_values[block_id] = dict_value
return {"values": dict_values} # type: ignore
else:
return {}
class ViewStateValue(JsonObject):
attributes = {
"type",
"value",
"selected_date",
"selected_conversation",
"selected_channel",
"selected_user",
"selected_option",
"selected_conversations",
"selected_channels",
"selected_users",
"selected_options",
}
def __init__(
self,
*,
type: Optional[str] = None, # skipcq: PYL-W0622
value: Optional[str] = None,
selected_date: Optional[str] = None,
selected_conversation: Optional[str] = None,
selected_channel: Optional[str] = None,
selected_user: Optional[str] = None,
selected_option: Optional[str] = None,
selected_conversations: Optional[List[str]] = None,
selected_channels: Optional[List[str]] = None,
selected_users: Optional[List[str]] = None,
selected_options: Optional[List[Union[dict, Option]]] = None,
):
self.type = type
self.value = value
self.selected_date = selected_date
self.selected_conversation = selected_conversation
self.selected_channel = selected_channel
self.selected_user = selected_user
self.selected_option = selected_option
self.selected_conversations = selected_conversations
self.selected_channels = selected_channels
self.selected_users = selected_users
if selected_options:
if isinstance(selected_options, dict):
self.selected_options = [Option(**d) for d in selected_options]
else:
self.selected_options = selected_options
| mit | -5,961,893,389,910,910,000 | 35.241525 | 99 | 0.582836 | false |
alirizakeles/tendenci | tendenci/apps/profiles/management/commands/update_admin_group_perms.py | 1 | 1507 | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import Group as Auth_Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
class Command(BaseCommand):
# create an admin auth group if it doesn't exists, and assign all permissions
# (except 4 auth permission the auth_user, auth_groups..) to this group
# command to run: python manage.py update_admin_group_perms
def handle(self, *args, **options):
out = ''
if hasattr(settings, 'ADMIN_AUTH_GROUP_NAME'):
name = settings.ADMIN_AUTH_GROUP_NAME
else:
name = 'Admin'
try:
auth_group = Auth_Group.objects.get(name=name)
except Auth_Group.DoesNotExist:
auth_group = Auth_Group(name=name)
auth_group.save()
#self.stdout.write('Successfully created an auth group "Admin".')
out = 'Successfully created an auth group "Admin".\n'
# assign permission to group, but exclude the auth content
content_to_exclude = ContentType.objects.filter(app_label='auth')
permissions = Permission.objects.all().exclude(content_type__in=content_to_exclude)
auth_group.permissions = permissions
auth_group.save()
#self.stdout.write('Successfully added all permissions to group "Admin".')
out += 'Successfully added/updated all permissions to group "%s".' % name
print out
| gpl-3.0 | 9,070,616,667,420,341,000 | 43.323529 | 91 | 0.668879 | false |
prculley/gramps | gramps/gui/views/navigationview.py | 1 | 17531 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2007 Donald N. Allingham
# Copyright (C) 2009-2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provide the base classes for GRAMPS' DataView classes
"""
#----------------------------------------------------------------
#
# python modules
#
#----------------------------------------------------------------
from abc import abstractmethod
import logging
_LOG = logging.getLogger('.navigationview')
#----------------------------------------------------------------
#
# gtk
#
#----------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
#----------------------------------------------------------------
#
# Gramps
#
#----------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from .pageview import PageView
from ..actiongroup import ActionGroup
from gramps.gen.utils.db import navigation_label
from gramps.gen.constfunc import mod_key
from ..utils import match_primary_mask
DISABLED = -1
MRU_SIZE = 10
MRU_TOP = [
'<ui>'
'<menubar name="MenuBar">'
'<menu action="GoMenu">'
'<placeholder name="CommonHistory">'
]
MRU_BTM = [
'</placeholder>'
'</menu>'
'</menubar>'
'</ui>'
]
#------------------------------------------------------------------------------
#
# NavigationView
#
#------------------------------------------------------------------------------
class NavigationView(PageView):
"""
The NavigationView class is the base class for all Data Views that require
navigation functionalilty. Views that need bookmarks and forward/backward
should derive from this class.
"""
def __init__(self, title, pdata, state, uistate, bm_type, nav_group):
PageView.__init__(self, title, pdata, state, uistate)
self.bookmarks = bm_type(self.dbstate, self.uistate, self.change_active)
self.fwd_action = None
self.back_action = None
self.book_action = None
self.other_action = None
self.active_signal = None
self.mru_signal = None
self.nav_group = nav_group
self.mru_active = DISABLED
self.uistate.register(state, self.navigation_type(), self.nav_group)
def navigation_type(self):
"""
Indictates the navigation type. Navigation type can be the string
name of any of the primary Objects. A History object will be
created for it, see DisplayState.History
"""
return None
def define_actions(self):
"""
Define menu actions.
"""
PageView.define_actions(self)
self.bookmark_actions()
self.navigation_actions()
def disable_action_group(self):
"""
Normally, this would not be overridden from the base class. However,
in this case, we have additional action groups that need to be
handled correctly.
"""
PageView.disable_action_group(self)
self.fwd_action.set_visible(False)
self.back_action.set_visible(False)
def enable_action_group(self, obj):
"""
Normally, this would not be overridden from the base class. However,
in this case, we have additional action groups that need to be
handled correctly.
"""
PageView.enable_action_group(self, obj)
self.fwd_action.set_visible(True)
self.back_action.set_visible(True)
hobj = self.get_history()
self.fwd_action.set_sensitive(not hobj.at_end())
self.back_action.set_sensitive(not hobj.at_front())
def change_page(self):
"""
Called when the page changes.
"""
hobj = self.get_history()
self.fwd_action.set_sensitive(not hobj.at_end())
self.back_action.set_sensitive(not hobj.at_front())
self.other_action.set_sensitive(not self.dbstate.db.readonly)
self.uistate.modify_statusbar(self.dbstate)
def set_active(self):
"""
Called when the page becomes active (displayed).
"""
PageView.set_active(self)
self.bookmarks.display()
hobj = self.get_history()
self.active_signal = hobj.connect('active-changed', self.goto_active)
self.mru_signal = hobj.connect('mru-changed', self.update_mru_menu)
self.update_mru_menu(hobj.mru)
self.goto_active(None)
def set_inactive(self):
"""
Called when the page becomes inactive (not displayed).
"""
if self.active:
PageView.set_inactive(self)
self.bookmarks.undisplay()
hobj = self.get_history()
hobj.disconnect(self.active_signal)
hobj.disconnect(self.mru_signal)
self.mru_disable()
def navigation_group(self):
"""
Return the navigation group.
"""
return self.nav_group
def get_history(self):
"""
Return the history object.
"""
return self.uistate.get_history(self.navigation_type(),
self.navigation_group())
def goto_active(self, active_handle):
"""
Callback (and usable function) that selects the active person
in the display tree.
"""
active_handle = self.uistate.get_active(self.navigation_type(),
self.navigation_group())
if active_handle:
self.goto_handle(active_handle)
hobj = self.get_history()
self.fwd_action.set_sensitive(not hobj.at_end())
self.back_action.set_sensitive(not hobj.at_front())
def get_active(self):
"""
Return the handle of the active object.
"""
hobj = self.uistate.get_history(self.navigation_type(),
self.navigation_group())
return hobj.present()
def change_active(self, handle):
"""
Changes the active object.
"""
hobj = self.get_history()
if handle and not hobj.lock and not (handle == hobj.present()):
hobj.push(handle)
@abstractmethod
def goto_handle(self, handle):
"""
Needs to be implemented by classes derived from this.
Used to move to the given handle.
"""
def selected_handles(self):
"""
Return the active person's handle in a list. Used for
compatibility with those list views that can return multiply
selected items.
"""
active_handle = self.uistate.get_active(self.navigation_type(),
self.navigation_group())
return [active_handle] if active_handle else []
####################################################################
# BOOKMARKS
####################################################################
def add_bookmark(self, obj):
"""
Add a bookmark to the list.
"""
from gramps.gen.display.name import displayer as name_displayer
active_handle = self.uistate.get_active('Person')
active_person = self.dbstate.db.get_person_from_handle(active_handle)
if active_person:
self.bookmarks.add(active_handle)
name = name_displayer.display(active_person)
self.uistate.push_message(self.dbstate,
_("%s has been bookmarked") % name)
else:
from ..dialog import WarningDialog
WarningDialog(
_("Could Not Set a Bookmark"),
_("A bookmark could not be set because "
"no one was selected."),
parent=self.uistate.window)
def edit_bookmarks(self, obj):
"""
Call the bookmark editor.
"""
self.bookmarks.edit()
def bookmark_actions(self):
"""
Define the bookmark menu actions.
"""
self.book_action = ActionGroup(name=self.title + '/Bookmark')
self.book_action.add_actions([
('AddBook', 'gramps-bookmark-new', _('_Add Bookmark'),
'<PRIMARY>d', None, self.add_bookmark),
('EditBook', 'gramps-bookmark-edit',
_("%(title)s...") % {'title': _("Organize Bookmarks")},
'<shift><PRIMARY>D', None,
self.edit_bookmarks),
])
self._add_action_group(self.book_action)
####################################################################
# NAVIGATION
####################################################################
def navigation_actions(self):
"""
Define the navigation menu actions.
"""
# add the Forward action group to handle the Forward button
self.fwd_action = ActionGroup(name=self.title + '/Forward')
self.fwd_action.add_actions([
('Forward', 'go-next', _("_Forward"),
"%sRight" % mod_key(), _("Go to the next object in the history"),
self.fwd_clicked)
])
# add the Backward action group to handle the Forward button
self.back_action = ActionGroup(name=self.title + '/Backward')
self.back_action.add_actions([
('Back', 'go-previous', _("_Back"),
"%sLeft" % mod_key(), _("Go to the previous object in the history"),
self.back_clicked)
])
self._add_action('HomePerson', 'go-home', _("_Home"),
accel="%sHome" % mod_key(),
tip=_("Go to the default person"), callback=self.home)
self.other_action = ActionGroup(name=self.title + '/PersonOther')
self.other_action.add_actions([
('SetActive', 'go-home', _("Set _Home Person"), None,
None, self.set_default_person),
])
self._add_action_group(self.back_action)
self._add_action_group(self.fwd_action)
self._add_action_group(self.other_action)
def set_default_person(self, obj):
"""
Set the default person.
"""
active = self.uistate.get_active('Person')
if active:
self.dbstate.db.set_default_person_handle(active)
def home(self, obj):
"""
Move to the default person.
"""
defperson = self.dbstate.db.get_default_person()
if defperson:
self.change_active(defperson.get_handle())
else:
from ..dialog import WarningDialog
WarningDialog(_("No Home Person"),
_("You need to set a 'default person' to go to. "
"Select the People View, select the person you want as "
"'Home Person', then confirm your choice "
"via the menu Edit ->Set Home Person."),
parent=self.uistate.window)
def jump(self):
"""
A dialog to move to a Gramps ID entered by the user.
"""
dialog = Gtk.Dialog(_('Jump to by Gramps ID'), self.uistate.window)
dialog.set_border_width(12)
label = Gtk.Label(label='<span weight="bold" size="larger">%s</span>' %
_('Jump to by Gramps ID'))
label.set_use_markup(True)
dialog.vbox.add(label)
dialog.vbox.set_spacing(10)
dialog.vbox.set_border_width(12)
hbox = Gtk.Box()
hbox.pack_start(Gtk.Label(label=_("%s: ") % _('ID')), True, True, 0)
text = Gtk.Entry()
text.set_activates_default(True)
hbox.pack_start(text, False, True, 0)
dialog.vbox.pack_start(hbox, False, True, 0)
dialog.add_buttons(_('_Cancel'), Gtk.ResponseType.CANCEL,
_('_Jump to'), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.vbox.show_all()
if dialog.run() == Gtk.ResponseType.OK:
gid = text.get_text()
handle = self.get_handle_from_gramps_id(gid)
if handle is not None:
self.change_active(handle)
else:
self.uistate.push_message(
self.dbstate,
_("Error: %s is not a valid Gramps ID") % gid)
dialog.destroy()
def get_handle_from_gramps_id(self, gid):
"""
Get an object handle from its Gramps ID.
Needs to be implemented by the inheriting class.
"""
pass
def fwd_clicked(self, obj):
"""
Move forward one object in the history.
"""
hobj = self.get_history()
hobj.lock = True
if not hobj.at_end():
hobj.forward()
self.uistate.modify_statusbar(self.dbstate)
self.fwd_action.set_sensitive(not hobj.at_end())
self.back_action.set_sensitive(True)
hobj.lock = False
def back_clicked(self, obj):
"""
Move backward one object in the history.
"""
hobj = self.get_history()
hobj.lock = True
if not hobj.at_front():
hobj.back()
self.uistate.modify_statusbar(self.dbstate)
self.back_action.set_sensitive(not hobj.at_front())
self.fwd_action.set_sensitive(True)
hobj.lock = False
####################################################################
# MRU functions
####################################################################
def mru_disable(self):
"""
Remove the UI and action groups for the MRU list.
"""
if self.mru_active != DISABLED:
self.uistate.uimanager.remove_ui(self.mru_active)
self.uistate.uimanager.remove_action_group(self.mru_action)
self.mru_active = DISABLED
def mru_enable(self):
"""
Enables the UI and action groups for the MRU list.
"""
if self.mru_active == DISABLED:
self.uistate.uimanager.insert_action_group(self.mru_action, 1)
self.mru_active = self.uistate.uimanager.add_ui_from_string(self.mru_ui)
self.uistate.uimanager.ensure_update()
def update_mru_menu(self, items):
"""
Builds the UI and action group for the MRU list.
"""
self.mru_disable()
nav_type = self.navigation_type()
hobj = self.get_history()
menu_len = min(len(items) - 1, MRU_SIZE)
entry = '<menuitem action="%s%02d"/>'
data = [entry % (nav_type, index) for index in range(0, menu_len)]
self.mru_ui = "".join(MRU_TOP) + "".join(data) + "".join(MRU_BTM)
mitems = items[-MRU_SIZE - 1:-1] # Ignore current handle
mitems.reverse()
data = []
for index, handle in enumerate(mitems):
name, obj = navigation_label(self.dbstate.db, nav_type, handle)
data.append(('%s%02d'%(nav_type, index), None, name,
"%s%d" % (mod_key(), index), None,
make_callback(hobj.push, handle)))
self.mru_action = ActionGroup(name=self.title + '/MRU')
self.mru_action.add_actions(data)
self.mru_enable()
####################################################################
# Template functions
####################################################################
@abstractmethod
def build_tree(self):
"""
Rebuilds the current display. This must be overridden by the derived
class.
"""
@abstractmethod
def build_widget(self):
"""
Builds the container widget for the interface. Must be overridden by the
the base class. Returns a gtk container widget.
"""
def key_press_handler(self, widget, event):
"""
Handle the control+c (copy) and control+v (paste), or pass it on.
"""
if self.active:
if event.type == Gdk.EventType.KEY_PRESS:
if (event.keyval == Gdk.KEY_c and
match_primary_mask(event.get_state())):
self.call_copy()
return True
return super(NavigationView, self).key_press_handler(widget, event)
def call_copy(self):
"""
Navigation specific copy (control+c) hander. If the
copy can be handled, it returns true, otherwise false.
The code brings up the Clipboard (if already exists) or
creates it. The copy is handled through the drag and drop
system.
"""
nav_type = self.navigation_type()
handles = self.selected_handles()
return self.copy_to_clipboard(nav_type, handles)
def make_callback(func, handle):
"""
Generates a callback function based off the passed arguments
"""
return lambda x: func(handle)
| gpl-2.0 | 7,257,362,902,535,874,000 | 33.646245 | 84 | 0.541384 | false |
python-odin/odinweb | odinweb/decorators.py | 1 | 20305 | """
Decorators
~~~~~~~~~~
A collection of decorators for identifying the various types of route.
"""
from __future__ import absolute_import
import odin
from odin.exceptions import ValidationError
from odin.utils import force_tuple, lazy_property, getmeta
from .constants import HTTPStatus, Method, Type
from .data_structures import NoPath, UrlPath, PathParam, Param, Response, DefaultResponse, MiddlewareList
from .helpers import get_resource, create_response
from .resources import Listing, Error
from .utils import to_bool, dict_filter
# Imports for typing support
from typing import Callable, Union, Tuple, Dict, Any, Generator, List, Set, Iterable # noqa
from .data_structures import BaseHttpRequest
from odin import Resource # noqa
# Type definitions
Tags = Union[str, Iterable[str]]
Methods = Union[Method, Iterable[Method]]
Path = Union[UrlPath, str, PathParam]
class Security(object):
"""
Security definition of an object.
"""
def __init__(self, name, *permissions):
# type: (str, str) -> None
self.name = name
self.permissions = set(permissions)
def to_swagger(self):
"""
Return swagger definition of this object.
"""
return {self.name: list(self.permissions)}
class Operation(object):
"""
Decorator for defining an API operation. Usually one of the helpers (listing, detail, update, delete) would be
used in place of this route decorator.
Usage::
class ItemApi(ResourceApi):
resource = Item
@route(path=PathType.Collection, methods=Method.GET)
def list_items(self, request):
...
return items
"""
_operation_count = 0
priority = 100 # Set limit high as this should be the last item
def __new__(cls, func=None, *args, **kwargs):
def inner(callback):
instance = super(Operation, cls).__new__(cls)
instance.__init__(callback, *args, **kwargs)
return instance
return inner(func) if func else inner
def __init__(self, callback, path=NoPath, methods=Method.GET, resource=None, tags=None, summary=None,
middleware=None):
# type: (Callable, Path, Methods, Type[Resource], Tags, str, List[Any]) -> None
"""
:param callback: Function we are routing
:param path: A sub path that can be used as a action.
:param methods: HTTP method(s) this function responses to.
:param resource: Specify the resource that this function encodes/decodes,
default is the one specified on the ResourceAPI instance.
:param tags: Tags to be applied to operation
:param summary: Summary of the what method does (for documentation)
:param middleware: List of additional middleware
"""
self.base_callback = self.callback = callback
self.url_path = UrlPath.from_object(path)
self.methods = force_tuple(methods)
self._resource = resource
# Sorting/hashing
self.sort_key = Operation._operation_count
Operation._operation_count += 1
# If this operation is bound to a ResourceAPI
self.binding = None
self.middleware = MiddlewareList(middleware or [])
self.middleware.append(self) # Add self as middleware to obtain pre-dispatch support
# Security object
self.security = None
# Documentation
self.deprecated = False
self.summary = summary
self.consumes = set()
self.produces = set()
self.responses = set()
self.parameters = set()
self._tags = set(force_tuple(tags))
# Copy values from callback (if defined)
for attr in ('deprecated', 'consumes', 'produces', 'responses', 'parameters', 'security'):
value = getattr(callback, attr, None)
if value is not None:
setattr(self, attr, value)
# Add a default response
self.responses.add(DefaultResponse('Unhandled error', Error))
def __call__(self, request, path_args):
# type: (BaseHttpRequest, Dict[Any]) -> Any
"""
Main wrapper around the operation callback function.
"""
# path_args is passed by ref so changes can be made.
for middleware in self.middleware.pre_dispatch:
middleware(request, path_args)
response = self.execute(request, **path_args)
for middleware in self.middleware.post_dispatch:
response = middleware(request, response)
return response
def __eq__(self, other):
"""
Compare to Operations to identify if they refer to the same endpoint.
Basically this means does the URL path and methods match?
"""
if isinstance(other, Operation):
return all(
getattr(self, a) == getattr(other, a)
for a in ('path', 'methods')
)
return NotImplemented
def __str__(self):
return "{} - {} {}".format(self.operation_id, '|'.join(m.value for m in self.methods), self.path)
def __repr__(self):
return "Operation({!r}, {!r}, {})".format(self.operation_id, self.path, self.methods)
def execute(self, request, *args, **path_args):
# type: (BaseHttpRequest, tuple, Dict[Any]) -> Any
"""
Execute the callback (binding callback if required)
"""
binding = self.binding
if binding:
# Provide binding as decorators are executed prior to binding
return self.callback(binding, request, *args, **path_args)
else:
return self.callback(request, *args, **path_args)
def bind_to_instance(self, instance):
"""
Bind a ResourceApi instance to an operation.
"""
self.binding = instance
self.middleware.append(instance)
def op_paths(self, path_prefix=None):
# type: (Path) -> Generator[Tuple[UrlPath, Operation]]
"""
Yield operations paths stored in containers.
"""
url_path = self.path
if path_prefix:
url_path = path_prefix + url_path
yield url_path, self
@lazy_property
def path(self):
"""
Prepared and setup URL Path.
"""
return self.url_path.apply_args(key_field=self.key_field_name)
@property
def resource(self):
"""
Resource associated with operation.
"""
if self._resource:
return self._resource
elif self.binding:
return self.binding.resource
@lazy_property
def key_field_name(self):
"""
Field identified as the key.
"""
name = 'resource_id'
if self.resource:
key_field = getmeta(self.resource).key_field
if key_field:
name = key_field.attname
return name
@property
def is_bound(self):
# type: () -> bool
"""
Operation is bound to a resource api
"""
return bool(self.binding)
# Docs ####################################################################
def to_swagger(self):
"""
Generate a dictionary for documentation generation.
"""
return dict_filter(
operationId=self.operation_id,
description=(self.callback.__doc__ or '').strip() or None,
summary=self.summary or None,
tags=list(self.tags) or None,
deprecated=self.deprecated or None,
consumes=list(self.consumes) or None,
parameters=[param.to_swagger(self.resource) for param in self.parameters] or None,
produces=list(self.produces) or None,
responses=dict(resp.to_swagger(self.resource) for resp in self.responses) or None,
security=self.security.to_swagger() if self.security else None,
)
@lazy_property
def operation_id(self):
value = getattr(self.base_callback, 'operation_id', None)
return value or "{}.{}".format(self.base_callback.__module__, self.base_callback.__name__)
@property
def tags(self):
# type: () -> Set[str]
"""
Tags applied to operation.
"""
tags = set()
if self._tags:
tags.update(self._tags)
if self.binding:
binding_tags = getattr(self.binding, 'tags', None)
if binding_tags:
tags.update(binding_tags)
return tags
collection = collection_action = operation = Operation
def security(name, *permissions):
"""
Decorator to add security definition.
"""
def inner(c):
c.security = Security(name, *permissions)
return c
return inner
def action(callback=None, name=None, path=None, methods=Method.GET, resource=None, tags=None,
summary=None, middleware=None):
# type: (Callable, Path, Path, Methods, Type[Resource], Tags, str, List[Any]) -> Operation
"""
Decorator to apply an action to a resource. An action is applied to a `detail` operation.
"""
# Generate action path
path = path or '{key_field}'
if name:
path += name
def inner(c):
return Operation(c, path, methods, resource, tags, summary, middleware)
return inner(callback) if callback else inner
class WrappedListOperation(Operation):
"""
Decorator to indicate a listing endpoint that uses a listing wrapper.
Usage::
class ItemApi(ResourceApi):
resource = Item
@listing(path=PathType.Collection, methods=Method.Get)
def list_items(self, request, offset, limit):
...
return items
"""
listing_resource = Listing
"""
Resource used to wrap listings.
"""
default_offset = 0
"""
Default offset if not specified.
"""
default_limit = 50
"""
Default limit of not specified.
"""
max_limit = None
"""
Maximum limit.
"""
def __init__(self, *args, **kwargs):
self.listing_resource = kwargs.pop('listing_resource', self.listing_resource)
self.default_offset = kwargs.pop('default_offset', self.default_offset)
self.default_limit = kwargs.pop('default_limit', self.default_limit)
self.max_limit = kwargs.pop('max_limit', self.max_limit)
super(WrappedListOperation, self).__init__(*args, **kwargs)
# Apply documentation
self.parameters.add(Param.query('offset', Type.Integer, "Offset to start listing from.",
default=self.default_offset))
self.parameters.add(Param.query('limit', Type.Integer, "Limit on the number of listings returned.",
default=self.default_limit, maximum=self.max_limit))
self.parameters.add(Param.query('bare', Type.Boolean, "Return a plain list of objects."))
def execute(self, request, *args, **path_args):
# type: (BaseHttpRequest, *Any, **Any) -> Any
# Get paging args from query string
offset = int(request.query.get('offset', self.default_offset))
if offset < 0:
offset = 0
path_args['offset'] = offset
max_limit = self.max_limit
limit = int(request.query.get('limit', self.default_limit))
if limit < 1:
limit = 1
elif max_limit and limit > max_limit:
limit = max_limit
path_args['limit'] = limit
bare = to_bool(request.query.get('bare', False))
# Run base execute
result = super(WrappedListOperation, self).execute(request, *args, **path_args)
if result is not None:
if isinstance(result, tuple) and len(result) == 2:
result, total_count = result
else:
total_count = None
return result if bare else Listing(result, limit, offset, total_count)
class ListOperation(Operation):
"""
Decorator to indicate a listing endpoint that does not use a container.
Usage::
class ItemApi(ResourceApi):
resource = Item
@listing(path=PathType.Collection, methods=Method.Get)
def list_items(self, request, offset, limit):
...
return items
"""
default_offset = 0
"""
Default offset if not specified.
"""
default_limit = 50
"""
Default limit of not specified.
"""
max_limit = None
"""
Maximum limit.
"""
def __init__(self, *args, **kwargs):
self.default_offset = kwargs.pop('default_offset', self.default_offset)
self.default_limit = kwargs.pop('default_limit', self.default_limit)
self.max_limit = kwargs.pop('max_limit', self.max_limit)
super(ListOperation, self).__init__(*args, **kwargs)
# Add validation fields
self._query_fields = [
odin.IntegerField(name='offset', default=self.default_offset,
null=False, min_value=0,
use_default_if_not_provided=True),
odin.IntegerField(name='limit', default=self.default_limit,
null=False, min_value=1, max_value=self.max_limit,
use_default_if_not_provided=True),
]
# Apply documentation
self.parameters.add(Param.query('offset', Type.Integer, "Offset to start listing from.",
default=self.default_offset))
self.parameters.add(Param.query('limit', Type.Integer, "Limit on the number of listings returned.",
default=self.default_limit, maximum=self.max_limit))
def execute(self, request, *args, **path_args):
# type: (BaseHttpRequest, *Any, **Any) -> Any
errors = {}
headers = {}
# Parse query strings
for field in self._query_fields:
value = request.GET.get(field.name, field.default)
try:
value = field.clean(value)
except ValidationError as ve:
errors[field.name] = ve.messages
else:
path_args[field.name] = value
headers['X-Page-{}'.format(field.name.title())] = str(value)
if errors:
raise ValidationError(errors)
# Run base execute
result = super(ListOperation, self).execute(request, *args, **path_args)
if result is not None:
if isinstance(result, tuple) and len(result) == 2:
result, total_count = result
if total_count is not None:
headers['X-Total-Count'] = str(total_count)
return create_response(request, result, headers=headers)
class ResourceOperation(Operation):
"""
Handle processing a request with a resource body.
It is assumed decorator will operate on a class method.
"""
def __init__(self, *args, **kwargs):
self.full_clean = kwargs.pop('full_clean', True)
self.default_to_not_supplied = kwargs.pop('default_to_not_supplied', False)
super(ResourceOperation, self).__init__(*args, **kwargs)
# Apply documentation
self.parameters.add(Param.body('Expected resource supplied with request.'))
def execute(self, request, *args, **path_args):
# type: (BaseHttpRequest, *Any, **Any) -> Any
item = None
if self.resource:
item = get_resource(request, self.resource, full_clean=self.full_clean,
default_to_not_supplied=self.default_to_not_supplied)
# Don't allow key_field to be edited
if hasattr(item, self.key_field_name):
setattr(item, self.key_field_name, None)
return super(ResourceOperation, self).execute(request, item, *args, **path_args)
# Shortcut methods
def listing(callback=None, path=None, method=Method.GET, resource=None, tags=None, summary="List resources",
middleware=None, default_limit=50, max_limit=None, use_wrapper=True):
# type: (Callable, Path, Methods, Resource, Tags, str, List[Any], int, int) -> Operation
"""
Decorator to configure an operation that returns a list of resources.
"""
op_type = WrappedListOperation if use_wrapper else ListOperation
def inner(c):
op = op_type(c, path or NoPath, method, resource, tags, summary, middleware,
default_limit=default_limit, max_limit=max_limit)
op.responses.add(Response(HTTPStatus.OK, "Listing of resources", Listing))
return op
return inner(callback) if callback else inner
def create(callback=None, path=None, method=Method.POST, resource=None, tags=None, summary="Create a new resource",
middleware=None):
# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation
"""
Decorator to configure an operation that creates a resource.
"""
def inner(c):
op = ResourceOperation(c, path or NoPath, method, resource, tags, summary, middleware)
op.responses.add(Response(HTTPStatus.CREATED, "{name} has been created"))
op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error))
return op
return inner(callback) if callback else inner
def detail(callback=None, path=None, method=Method.GET, resource=None, tags=None, summary="Get specified resource.",
middleware=None):
# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation
"""
Decorator to configure an operation that fetches a resource.
"""
def inner(c):
op = Operation(c, path or PathParam('{key_field}'), method, resource, tags, summary, middleware)
op.responses.add(Response(HTTPStatus.OK, "Get a {name}"))
op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error))
return op
return inner(callback) if callback else inner
def update(callback=None, path=None, method=Method.PUT, resource=None, tags=None, summary="Update specified resource.",
middleware=None):
# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation
"""
Decorator to configure an operation that updates a resource.
"""
def inner(c):
op = ResourceOperation(c, path or PathParam('{key_field}'), method, resource, tags, summary, middleware)
op.responses.add(Response(HTTPStatus.NO_CONTENT, "{name} has been updated."))
op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error))
op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error))
return op
return inner(callback) if callback else inner
def patch(callback=None, path=None, method=Method.PATCH, resource=None, tags=None, summary="Patch specified resource.",
middleware=None):
# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation
"""
Decorator to configure an operation that patches a resource.
"""
def inner(c):
op = ResourceOperation(c, path or PathParam('{key_field}'), method, resource, tags, summary, middleware,
full_clean=False, default_to_not_supplied=True)
op.responses.add(Response(HTTPStatus.OK, "{name} has been patched."))
op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error))
op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error))
return op
return inner(callback) if callback else inner
def delete(callback=None, path=None, method=Method.DELETE, tags=None, summary="Delete specified resource.",
middleware=None):
# type: (Callable, Path, Methods, Tags, str, List[Any]) -> Operation
"""
Decorator to configure an operation that deletes resource.
"""
def inner(c):
op = Operation(c, path or PathParam('{key_field}'), method, None, tags, summary, middleware)
op.responses.add(Response(HTTPStatus.NO_CONTENT, "{name} has been deleted.", None))
op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error))
return op
return inner(callback) if callback else inner
| bsd-3-clause | 8,623,202,511,612,728,000 | 34.4363 | 119 | 0.606304 | false |
Mgamerz/fsoi_plugins | FSOI_Plugins/Plugins/Desktoppr/DesktopprPlugin.py | 1 | 2766 | import SourceBase
class DesktopprPlugin(SourceBase.SourceBase):
pluginid = '_fsiplugin_desktoppr' #OVERRIDE THIS IN YOUR SUBCLASS. If you don't, the program will ignore your plugin.
sourcename = 'Desktoppr'
sourceurl = 'http://Desktoppr.co'
def __init__(self):
'''Your plugin will be returned a DisplayBundle object. It contains system information like screen resolution.
You should store this in your plugin when it is initialized.
'''
return
def load_plugin(self):
'''This method is called after the dependencies for this module are checked. You should import and store any local dependencies
from modules that are in your plugin folder. You should create any objects you may need here or store the class if necessary.
The reason this method is here is there will be a feature that if a module this plugin depends on is missing, it can still be instantiated,
and further information can be passed to the user on how to fix it. The program may at one point offer a way to download dependencies.
If you are positive that it will load, you can ignore this method and leave it blank, but it is not recommended.
'''
import DesktopprApi
self.api = DesktopprApi.DesktopprAPI()
return
def get_images(self, displaybundle):
'''This method should return a list of URLs.'''
#Might need to do config specific options here when configuration is implemented.
urls = self.api.get_wallpaper_urls()
url_group = []
for url in urls:
url_group.append((url, self.filename_from_url(url)))
return url_group
def get_source_info(self):
'''This method should return a list containing a human friendly name at index 0, and a human readable url describing the source for this repository.
For example, the EarthPorn subreddit returns a list ['EarthPorn Subreddit', 'http://reddit.com/r/EarthPorn'].
This is used to populate the treeview object with your source information.'''
return [self.sourcename, self.sourceurl]
def get_pluginid(self):
'''This method should return a string that represents this plugins ID.
The pluginid is used to make calls to this plugin when necessary. It should be unique as ids are in a shared pool,
so make sure the id is unique. The id should remain the same even when updated as some settings with the pluginid
are persisted by the main application, and they will be lost if the id changes.
'''
return self.pluginid
def get_dependencies(self):
return ['DesktopprApi'] | gpl-3.0 | -8,432,820,261,917,502,000 | 51.230769 | 156 | 0.670282 | false |
richardliaw/ray | python/ray/experimental/client/api.py | 1 | 2068 | # This file defines an interface and client-side API stub
# for referring either to the core Ray API or the same interface
# from the Ray client.
#
# In tandem with __init__.py, we want to expose an API that's
# close to `python/ray/__init__.py` but with more than one implementation.
# The stubs in __init__ should call into a well-defined interface.
# Only the core Ray API implementation should actually `import ray`
# (and thus import all the raylet worker C bindings and such).
# But to make sure that we're matching these calls, we define this API.
from abc import ABC
from abc import abstractmethod
class APIImpl(ABC):
@abstractmethod
def get(self, *args, **kwargs):
pass
@abstractmethod
def put(self, *args, **kwargs):
pass
@abstractmethod
def wait(self, *args, **kwargs):
pass
@abstractmethod
def remote(self, *args, **kwargs):
pass
@abstractmethod
def call_remote(self, f, kind, *args, **kwargs):
pass
@abstractmethod
def close(self, *args, **kwargs):
pass
class ClientAPI(APIImpl):
def __init__(self, worker):
self.worker = worker
def get(self, *args, **kwargs):
return self.worker.get(*args, **kwargs)
def put(self, *args, **kwargs):
return self.worker.put(*args, **kwargs)
def wait(self, *args, **kwargs):
return self.worker.wait(*args, **kwargs)
def remote(self, *args, **kwargs):
return self.worker.remote(*args, **kwargs)
def call_remote(self, f, kind, *args, **kwargs):
return self.worker.call_remote(f, kind, *args, **kwargs)
def close(self, *args, **kwargs):
return self.worker.close()
def __getattr__(self, key: str):
if not key.startswith("_"):
raise NotImplementedError(
"Not available in Ray client: `ray.{}`. This method is only "
"available within Ray remote functions and is not yet "
"implemented in the client API.".format(key))
return self.__getattribute__(key)
| apache-2.0 | 7,225,420,030,808,673,000 | 28.542857 | 77 | 0.62766 | false |
Mikescher/Project-Euler_Befunge | compiled/Python2/Euler_Problem-024.py | 1 | 3038 | #!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABACdUDGOAyEM/AoHW7FBYna5XIKQdQ9B3BUr0VJZKXj8mZAUKXMuzGA8nsHsse3h8/x1uaq3g/RxHNpa8PtcxQ3btQEu/YP8NMA0pWdODzAm0sSU4TLf"
+ "qw1hRUVItKFGrJ36QD5ThIum/DDZPM4ldiHuaApBkqAaUC1Qfz/6Q3l59bFAFZFs54tluRSpdadvWlUfc8pIojt9jfge7p5hijfJsDenVZk05/L9nbDmYQWzscjCnHxg"
+ "G0uzA4WKvQIqlSxa2WmvRY+MUwbKLDJOWJP8B/NXo/XoAQAA")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<61 and y<8):
return g[y*61 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<61 and y<8):
g[y*61 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(1,1,999999)
gw(2,1,9)
return 1
def _1():
global t0
t0=gr(2,1)
return (3)if(gr(2,1)!=-1)else(2)
def _2():
return 24
def _3():
global t0
return (4)if((t0)!=0)else(23)
def _4():
sa(0)
sa(gr(2,1))
sa(gr(2,1)-1)
sa(gr(2,1)-1)
return 5
def _5():
return (22)if(sp()!=0)else(6)
def _6():
sp();
sa(sp()*1)
return 7
def _7():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return (21)if(sp()!=0)else(8)
def _8():
sp();
sa(sr());
return (9)if(sp()!=0)else(20)
def _9():
gw(3,1,sp())
return 10
def _10():
gw(4,1,1)
return 11
def _11():
return (12)if((gr(3,1)*gr(4,1))>gr(1,1))else(19)
def _12():
sa(gr(4,1))
return 13
def _13():
sa(1)
sa(gr(1,0)-120)
return 14
def _14():
return (18)if(sp()!=0)else(15)
def _15():
sa(sp()+1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return (17)if(sp()!=0)else(16)
def _16():
global t0
sp();
sa(sp()-1)
sa(sr());
sa(0)
v0=sp()
t0=gr(sp(),v0)
t0=t0-48
sa(120)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(0)
v0=sp()
v1=sp()
gw(v1,v0,sp())
gw(1,1,gr(1,1)-(gr(3,1)*(gr(4,1)-1)))
gw(2,1,gr(2,1)-1)
t0=t0+48
sys.stdout.write(chr(t0))
sys.stdout.flush()
return 1
def _17():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
sa(0)
v0=sp()
sa(gr(sp(),v0))
sa(sp()-120)
return 14
def _18():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return 15
def _19():
gw(4,1,gr(4,1)+1)
return 11
def _20():
gw(3,1,1)
sp();
return 10
def _21():
sa(sp()*sp());
return 7
def _22():
sa(sr()-1)
sa(sr());
return 5
def _23():
global t0
t0=0
sa(1)
return 13
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23]
c=0
while c<24:
c=m[c]()
| mit | 7,913,050,719,196,176,000 | 16.45977 | 136 | 0.505925 | false |
lambdamusic/testproject | konproj/apps/registration/backends/default/__init__.py | 1 | 5254 | from django.conf import settings
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from registration import signals
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
class DefaultBackend(object):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``bstrap3.2.0/registration/activation_email_subject.txt`` and
``bstrap3.2.0/registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def register(self, request, **kwargs):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email,
password, site)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
# December 30, 2014
# HACK to get an email when a new user signs up
# try/except should prevent unwanted errors
try:
from django.core.mail import send_mail
subject = 'LiquidQuotes: <%s> has registered' % username
message = 'Good news! The user <%s> has registered. Email is %s' % (username, email)
send_mail( subject, message, '[email protected]',
['[email protected]'], fail_silently=False)
except:
print "Error: Registration Signal: tried to send email to admin but failed"
pass
return new_user
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
return ('registration_activation_complete', (), {})
| gpl-2.0 | -193,884,851,103,439,680 | 33.116883 | 87 | 0.730681 | false |
IronLanguages/ironpython3 | Tests/test_ipye.py | 1 | 8082 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
##
## Testing IronPython Engine
##
from iptest.assert_util import *
skiptest("win32")
import sys
remove_ironpython_dlls(testpath.public_testdir)
load_iron_python_dll()
# setup Scenario tests in module from EngineTest.cs
# this enables us to see the individual tests that pass / fail
load_iron_python_test()
import IronPython
import IronPythonTest
et = IronPythonTest.EngineTest()
multipleexecskips = [ ]
for s in dir(et):
if s.startswith("Scenario"):
if s in multipleexecskips:
exec('@skip("multiple_execute") \ndef test_Engine_%s(): getattr(et, "%s")()' % (s, s))
else :
exec('def test_Engine_%s(): getattr(et, "%s")()' % (s, s))
#Rowan Work Item 312902
@disabled("The ProfileDrivenCompilation feature is removed from DLR")
def test_deferred_compilation():
save1 = IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode
save2 = IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation
modules = sys.modules.copy()
IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation = True # this will enable interpreted mode
Assert(IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode)
try:
# Just import some modules to make sure we can switch to compilation without blowing up
import test_namebinding
import test_function
import test_tcf
finally:
IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode = save1
IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation = save2
sys.modules = modules
def CreateOptions():
import sys
import clr
o = IronPython.PythonEngineOptions()
if sys.argv.count('-X:ExceptionDetail') > 0: o.ExceptionDetail = True
return o
def a():
raise System.Exception()
def b():
try:
a()
except System.Exception as e:
raise System.Exception("second", e)
def c():
try:
b()
except System.Exception as e:
x = System.Exception("first", e)
return x
#Rowan Work Item 312902
@skip("multiple_execute")
def test_formatexception():
try:
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine()
service = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]()
AssertError(TypeError, service.FormatException, None)
exc_string = service.FormatException(System.Exception("first",
System.Exception("second",
System.Exception())))
AreEqual(exc_string, 'Traceback (most recent call last):[NEWLINE]Exception: first[NEWLINE]'.replace('[NEWLINE]', System.Environment.NewLine))
exc_string = service.FormatException(c())
AreEqual(exc_string.count(" File "), 4)
AreEqual(exc_string.count(" line "), 4)
finally:
pass
#Rowan Work Item 31290
def test_formatexception_showclrexceptions():
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine({'ShowClrExceptions': True})
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(System.Exception("first",
System.Exception("second",
System.Exception())))
AreEqual(exc_string, "Traceback (most recent call last):[NEWLINE]Exception: first[NEWLINE]CLR Exception: [NEWLINE] Exception[NEWLINE]: [NEWLINE]first[NEWLINE] Exception[NEWLINE]: [NEWLINE]second[NEWLINE] Exception[NEWLINE]: [NEWLINE]Exception of type 'System.Exception' was thrown.[NEWLINE]".replace("[NEWLINE]", System.Environment.NewLine))
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(c())
AreEqual(exc_string.count(" File "), 4)
AreEqual(exc_string.count(" line "), 4)
Assert(exc_string.endswith("CLR Exception: [NEWLINE] Exception[NEWLINE]: [NEWLINE]first[NEWLINE] Exception[NEWLINE]: [NEWLINE]second[NEWLINE] Exception[NEWLINE]: [NEWLINE]Exception of type 'System.Exception' was thrown.[NEWLINE]".replace("[NEWLINE]", System.Environment.NewLine)))
@skip("multiple_execute") #CodePlex 20636 - multi-execute
def test_formatexception_exceptiondetail():
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine({'ExceptionDetail': True})
try:
x = System.Collections.Generic.Dictionary[object, object]()
x[None] = 42
except System.Exception as e:
pass
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(System.Exception("first", e))
Assert(exc_string.startswith("first"))
Assert(exc_string.find('Insert') >= 0)
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(c())
Assert(exc_string.endswith("Exception: first[NEWLINE]".replace("[NEWLINE]", System.Environment.NewLine)))
def test_engine_access_from_within():
import clr
from Microsoft.Scripting.Hosting import ScriptEngine
pc = clr.GetCurrentRuntime().GetLanguageByName('python')
engine = pc.GetModuleState(clr.GetClrType(ScriptEngine))
Assert(engine is not None)
def test_import_clr():
from IronPython.Hosting import Python
eng = Python.CreateEngine()
mod = Python.ImportModule(eng, 'clr')
Assert('ToString' not in eng.Operations.GetMemberNames(42))
def test_cp6703():
import clr
clr.AddReference("IronPython")
import IronPython
pe = IronPython.Hosting.Python.CreateEngine()
stuff = '''
import System
a = 2
globals()["b"] = None
globals().Add("c", "blah")
joe = System.Collections.Generic.KeyValuePair[object,object]("d", int(3))
globals().Add(joe)
count = 0
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("b", None)): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("c", "blah")): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("d", int(3))): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("d", 3)): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("a", 2)): count += 1
'''
s = pe.CreateScope()
pe.Execute(stuff, s)
AreEqual(s.count, 5)
def test_cp20594():
import IronPython
AreEqual(IronPython.Runtime.PythonContext.GetIronPythonAssembly("IronPython").split(",", 1)[1],
IronPython.Runtime.PythonContext.GetIronPythonAssembly("IronPython.Modules").split(",", 1)[1])
def test_cp27547():
import clr
clr.AddReference('IronPython')
clr.AddReference('Microsoft.Scripting')
from IronPython.Hosting import Python
from Microsoft.Scripting import SourceCodeKind, ScriptCodeParseResult
engine = Python.CreateEngine()
scope = engine.CreateScope()
text = 'lambda'
source = engine.CreateScriptSourceFromString(text, 'stdin',
SourceCodeKind.InteractiveCode)
result = source.GetCodeProperties()
AreEqual(result, ScriptCodeParseResult.IncompleteToken)
def test_hidden_base():
from IronPythonTest import DerivedFromHiddenBase
a = DerivedFromHiddenBase()
AreEqual(a.Accessible(), 42)
AssertError(AttributeError, lambda: a.Inaccessible)
def test_cp27150():
from IronPythonTest import GenericProperty
from System import DateTime
wrapper = GenericProperty[DateTime]()
def f():
wrapper.Value = None
AssertError(TypeError, f)
#--MAIN------------------------------------------------------------------------
run_test(__name__)
#Make sure this runs last
#test_dispose()
| apache-2.0 | -117,330,867,685,867,000 | 37.669856 | 354 | 0.682876 | false |
NikNitro/Python-iBeacon-Scan | sympy/core/numbers.py | 1 | 109599 | from __future__ import print_function, division
import decimal
import fractions
import math
import warnings
import re as regex
from collections import defaultdict
from .containers import Tuple
from .sympify import converter, sympify, _sympify, SympifyError
from .singleton import S, Singleton
from .expr import Expr, AtomicExpr
from .decorators import _sympifyit
from .cache import cacheit, clear_cache
from .logic import fuzzy_not
from sympy.core.compatibility import (
as_int, integer_types, long, string_types, with_metaclass, HAS_GMPY,
SYMPY_INTS)
import mpmath
import mpmath.libmp as mlib
from mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed
from mpmath.ctx_mp import mpnumeric
from mpmath.libmp.libmpf import (
finf as _mpf_inf, fninf as _mpf_ninf,
fnan as _mpf_nan, fzero as _mpf_zero, _normalize as mpf_normalize,
prec_to_dps)
from sympy.utilities.misc import debug, filldedent
from .evaluate import global_evaluate
from sympy.utilities.exceptions import SymPyDeprecationWarning
rnd = mlib.round_nearest
_LOG2 = math.log(2)
def comp(z1, z2, tol=None):
"""Return a bool indicating whether the error between z1 and z2 is <= tol.
If ``tol`` is None then True will be returned if there is a significant
difference between the numbers: ``abs(z1 - z2)*10**p <= 1/2`` where ``p``
is the lower of the precisions of the values. A comparison of strings will
be made if ``z1`` is a Number and a) ``z2`` is a string or b) ``tol`` is ''
and ``z2`` is a Number.
When ``tol`` is a nonzero value, if z2 is non-zero and ``|z1| > 1``
the error is normalized by ``|z1|``, so if you want to see if the
absolute error between ``z1`` and ``z2`` is <= ``tol`` then call this
as ``comp(z1 - z2, 0, tol)``.
"""
if type(z2) is str:
if not isinstance(z1, Number):
raise ValueError('when z2 is a str z1 must be a Number')
return str(z1) == z2
if not z1:
z1, z2 = z2, z1
if not z1:
return True
if not tol:
if tol is None:
if type(z2) is str and getattr(z1, 'is_Number', False):
return str(z1) == z2
a, b = Float(z1), Float(z2)
return int(abs(a - b)*10**prec_to_dps(
min(a._prec, b._prec)))*2 <= 1
elif all(getattr(i, 'is_Number', False) for i in (z1, z2)):
return z1._prec == z2._prec and str(z1) == str(z2)
raise ValueError('exact comparison requires two Numbers')
diff = abs(z1 - z2)
az1 = abs(z1)
if z2 and az1 > 1:
return diff/az1 <= tol
else:
return diff <= tol
def mpf_norm(mpf, prec):
"""Return the mpf tuple normalized appropriately for the indicated
precision after doing a check to see if zero should be returned or
not when the mantissa is 0. ``mpf_normlize`` always assumes that this
is zero, but it may not be since the mantissa for mpf's values "+inf",
"-inf" and "nan" have a mantissa of zero, too.
Note: this is not intended to validate a given mpf tuple, so sending
mpf tuples that were not created by mpmath may produce bad results. This
is only a wrapper to ``mpf_normalize`` which provides the check for non-
zero mpfs that have a 0 for the mantissa.
"""
sign, man, expt, bc = mpf
if not man:
# hack for mpf_normalize which does not do this;
# it assumes that if man is zero the result is 0
# (see issue 6639)
if not bc:
return _mpf_zero
else:
# don't change anything; this should already
# be a well formed mpf tuple
return mpf
# Necessary if mpmath is using the gmpy backend
from mpmath.libmp.backend import MPZ
rv = mpf_normalize(sign, MPZ(man), expt, bc, prec, rnd)
return rv
# TODO: we should use the warnings module
_errdict = {"divide": False}
def seterr(divide=False):
"""
Should sympy raise an exception on 0/0 or return a nan?
divide == True .... raise an exception
divide == False ... return nan
"""
if _errdict["divide"] != divide:
clear_cache()
_errdict["divide"] = divide
def _as_integer_ratio(p):
neg_pow, man, expt, bc = getattr(p, '_mpf_', mpmath.mpf(p)._mpf_)
p = [1, -1][neg_pow % 2]*man
if expt < 0:
q = 2**-expt
else:
q = 1
p *= 2**expt
return int(p), int(q)
def _decimal_to_Rational_prec(dec):
"""Convert an ordinary decimal instance to a Rational."""
if not dec.is_finite():
raise TypeError("dec must be finite, got %s." % dec)
s, d, e = dec.as_tuple()
prec = len(d)
if e >= 0: # it's an integer
rv = Integer(int(dec))
else:
s = (-1)**s
d = sum([di*10**i for i, di in enumerate(reversed(d))])
rv = Rational(s*d, 10**-e)
return rv, prec
def _literal_float(f):
"""Return True if n can be interpreted as a floating point number."""
pat = r"[-+]?((\d*\.\d+)|(\d+\.?))(eE[-+]?\d+)?"
return bool(regex.match(pat, f))
# (a,b) -> gcd(a,b)
_gcdcache = {}
# TODO caching with decorator, but not to degrade performance
def igcd(*args):
"""Computes nonnegative integer greatest common divisor.
The algorithm is based on the well known Euclid's algorithm. To
improve speed, igcd() has its own caching mechanism implemented.
Examples
========
>>> from sympy.core.numbers import igcd
>>> igcd(2, 4)
2
>>> igcd(5, 10, 15)
5
"""
if len(args) < 2:
raise TypeError(
'igcd() takes at least 2 arguments (%s given)' % len(args))
if 1 in args:
a = 1
k = 0
else:
a = abs(as_int(args[0]))
k = 1
if a != 1:
while k < len(args):
b = args[k]
k += 1
try:
a = _gcdcache[(a, b)]
except KeyError:
b = as_int(b)
if not b:
continue
if b == 1:
a = 1
break
if b < 0:
b = -b
t = a, b
while b:
a, b = b, a % b
_gcdcache[t] = _gcdcache[t[1], t[0]] = a
while k < len(args):
ok = as_int(args[k])
k += 1
return a
def ilcm(*args):
"""Computes integer least common multiple.
Examples
========
>>> from sympy.core.numbers import ilcm
>>> ilcm(5, 10)
10
>>> ilcm(7, 3)
21
>>> ilcm(5, 10, 15)
30
"""
if len(args) < 2:
raise TypeError(
'ilcm() takes at least 2 arguments (%s given)' % len(args))
if 0 in args:
return 0
a = args[0]
for b in args[1:]:
a = a*b // igcd(a, b)
return a
def igcdex(a, b):
"""Returns x, y, g such that g = x*a + y*b = gcd(a, b).
>>> from sympy.core.numbers import igcdex
>>> igcdex(2, 3)
(-1, 1, 1)
>>> igcdex(10, 12)
(-1, 1, 2)
>>> x, y, g = igcdex(100, 2004)
>>> x, y, g
(-20, 1, 4)
>>> x*100 + y*2004
4
"""
if (not a) and (not b):
return (0, 1, 0)
if not a:
return (0, b//abs(b), abs(b))
if not b:
return (a//abs(a), 0, abs(a))
if a < 0:
a, x_sign = -a, -1
else:
x_sign = 1
if b < 0:
b, y_sign = -b, -1
else:
y_sign = 1
x, y, r, s = 1, 0, 0, 1
while b:
(c, q) = (a % b, a // b)
(a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s)
return (x*x_sign, y*y_sign, a)
def mod_inverse(a, m):
"""
Return the number c such that, ( a * c ) % m == 1 where
c has the same sign as a. If no such value exists, a
ValueError is raised.
Examples
========
>>> from sympy import S
>>> from sympy.core.numbers import mod_inverse
Suppose we wish to find multiplicative inverse x of
3 modulo 11. This is the same as finding x such
that 3 * x = 1 (mod 11). One value of x that satisfies
this congruence is 4. Because 3 * 4 = 12 and 12 = 1 mod(11).
This is the value return by mod_inverse:
>>> mod_inverse(3, 11)
4
>>> mod_inverse(-3, 11)
-4
When there is a common factor between the numerators of
``a`` and ``m`` the inverse does not exist:
>>> mod_inverse(2, 4)
Traceback (most recent call last):
...
ValueError: inverse of 2 mod 4 does not exist
>>> mod_inverse(S(2)/7, S(5)/2)
7/2
References
==========
- https://en.wikipedia.org/wiki/Modular_multiplicative_inverse
- https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
"""
c = None
try:
a, m = as_int(a), as_int(m)
if m > 1:
x, y, g = igcdex(a, m)
if g == 1:
c = x % m
if a < 0:
c -= m
except ValueError:
a, m = sympify(a), sympify(m)
if not (a.is_number and m.is_number):
raise TypeError(filldedent('''
Expected numbers for arguments; symbolic `mod_inverse`
is not implemented
but symbolic expressions can be handled with the
similar function,
sympy.polys.polytools.invert'''))
big = (m > 1)
if not (big is S.true or big is S.false):
raise ValueError('m > 1 did not evaluate; try to simplify %s' % m)
elif big:
c = 1/a
if c is None:
raise ValueError('inverse of %s (mod %s) does not exist' % (a, m))
return c
class Number(AtomicExpr):
"""
Represents any kind of number in sympy.
Floating point numbers are represented by the Float class.
Integer numbers (of any size), together with rational numbers (again,
there is no limit on their size) are represented by the Rational class.
If you want to represent, for example, ``1+sqrt(2)``, then you need to do::
Rational(1) + sqrt(Rational(2))
"""
is_commutative = True
is_number = True
is_Number = True
__slots__ = []
# Used to make max(x._prec, y._prec) return x._prec when only x is a float
_prec = -1
def __new__(cls, *obj):
if len(obj) == 1:
obj = obj[0]
if isinstance(obj, Number):
return obj
if isinstance(obj, SYMPY_INTS):
return Integer(obj)
if isinstance(obj, tuple) and len(obj) == 2:
return Rational(*obj)
if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)):
return Float(obj)
if isinstance(obj, string_types):
val = sympify(obj)
if isinstance(val, Number):
return val
else:
raise ValueError('String "%s" does not denote a Number' % obj)
if isinstance(obj, Number):
return obj
msg = "expected str|int|long|float|Decimal|Number object but got %r"
raise TypeError(msg % type(obj).__name__)
def invert(self, other, *gens, **args):
from sympy.polys.polytools import invert
if getattr(other, 'is_number', True):
return mod_inverse(self, other)
return invert(self, other, *gens, **args)
def __divmod__(self, other):
from .containers import Tuple
from sympy.functions.elementary.complexes import sign
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(self).__name__, type(other).__name__))
if not other:
raise ZeroDivisionError('modulo by zero')
if self.is_Integer and other.is_Integer:
return Tuple(*divmod(self.p, other.p))
else:
rat = self/other
w = sign(rat)*int(abs(rat)) # = rat.floor()
r = self - other*w
return Tuple(w, r)
def __rdivmod__(self, other):
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(other).__name__, type(self).__name__))
return divmod(other, self)
def __round__(self, *args):
return round(float(self), *args)
def _as_mpf_val(self, prec):
"""Evaluation of mpf tuple accurate to at least prec bits."""
raise NotImplementedError('%s needs ._as_mpf_val() method' %
(self.__class__.__name__))
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def _as_mpf_op(self, prec):
prec = max(prec, self._prec)
return self._as_mpf_val(prec), prec
def __float__(self):
return mlib.to_float(self._as_mpf_val(53))
def floor(self):
raise NotImplementedError('%s needs .floor() method' %
(self.__class__.__name__))
def ceiling(self):
raise NotImplementedError('%s needs .ceiling() method' %
(self.__class__.__name__))
def _eval_conjugate(self):
return self
def _eval_order(self, *symbols):
from sympy import Order
# Order(5, x, y) -> Order(1,x,y)
return Order(S.One, *symbols)
def _eval_subs(self, old, new):
if old == -self:
return -new
return self # there is no other possibility
def _eval_is_finite(self):
return True
@classmethod
def class_key(cls):
return 1, 0, 'Number'
@cacheit
def sort_key(self, order=None):
return self.class_key(), (0, ()), (), self
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.Infinity
elif other is S.NegativeInfinity:
return S.NegativeInfinity
return AtomicExpr.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
return S.Infinity
return AtomicExpr.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.Infinity
else:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
elif isinstance(other, Tuple):
return NotImplemented
return AtomicExpr.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
if other is S.NaN:
return S.NaN
elif other is S.Infinity or other is S.NegativeInfinity:
return S.Zero
return AtomicExpr.__div__(self, other)
__truediv__ = __div__
def __eq__(self, other):
raise NotImplementedError('%s needs .__eq__() method' %
(self.__class__.__name__))
def __ne__(self, other):
raise NotImplementedError('%s needs .__ne__() method' %
(self.__class__.__name__))
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
raise NotImplementedError('%s needs .__lt__() method' %
(self.__class__.__name__))
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
raise NotImplementedError('%s needs .__le__() method' %
(self.__class__.__name__))
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
return _sympify(other).__lt__(self)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
return _sympify(other).__le__(self)
def __hash__(self):
return super(Number, self).__hash__()
def is_constant(self, *wrt, **flags):
return True
def as_coeff_mul(self, *deps, **kwargs):
# a -> c*t
if self.is_Rational or not kwargs.pop('rational', True):
return self, tuple()
elif self.is_negative:
return S.NegativeOne, (-self,)
return S.One, (self,)
def as_coeff_add(self, *deps):
# a -> c + t
if self.is_Rational:
return self, tuple()
return S.Zero, (self,)
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
if rational and not self.is_Rational:
return S.One, self
return (self, S.One) if self else (S.One, self)
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation. """
if not rational:
return self, S.Zero
return S.Zero, self
def gcd(self, other):
"""Compute GCD of `self` and `other`. """
from sympy.polys import gcd
return gcd(self, other)
def lcm(self, other):
"""Compute LCM of `self` and `other`. """
from sympy.polys import lcm
return lcm(self, other)
def cofactors(self, other):
"""Compute GCD and cofactors of `self` and `other`. """
from sympy.polys import cofactors
return cofactors(self, other)
class Float(Number):
"""Represent a floating-point number of arbitrary precision.
Examples
========
>>> from sympy import Float
>>> Float(3.5)
3.50000000000000
>>> Float(3)
3.00000000000000
Creating Floats from strings (and Python ``int`` and ``long``
types) will give a minimum precision of 15 digits, but the
precision will automatically increase to capture all digits
entered.
>>> Float(1)
1.00000000000000
>>> Float(10**20)
100000000000000000000.
>>> Float('1e20')
100000000000000000000.
However, *floating-point* numbers (Python ``float`` types) retain
only 15 digits of precision:
>>> Float(1e20)
1.00000000000000e+20
>>> Float(1.23456789123456789)
1.23456789123457
It may be preferable to enter high-precision decimal numbers
as strings:
Float('1.23456789123456789')
1.23456789123456789
The desired number of digits can also be specified:
>>> Float('1e-3', 3)
0.00100
>>> Float(100, 4)
100.0
Float can automatically count significant figures if a null string
is sent for the precision; space are also allowed in the string. (Auto-
counting is only allowed for strings, ints and longs).
>>> Float('123 456 789 . 123 456', '')
123456789.123456
>>> Float('12e-3', '')
0.012
>>> Float(3, '')
3.
If a number is written in scientific notation, only the digits before the
exponent are considered significant if a decimal appears, otherwise the
"e" signifies only how to move the decimal:
>>> Float('60.e2', '') # 2 digits significant
6.0e+3
>>> Float('60e2', '') # 4 digits significant
6000.
>>> Float('600e-2', '') # 3 digits significant
6.00
Notes
=====
Floats are inexact by their nature unless their value is a binary-exact
value.
>>> approx, exact = Float(.1, 1), Float(.125, 1)
For calculation purposes, evalf needs to be able to change the precision
but this will not increase the accuracy of the inexact value. The
following is the most accurate 5-digit approximation of a value of 0.1
that had only 1 digit of precision:
>>> approx.evalf(5)
0.099609
By contrast, 0.125 is exact in binary (as it is in base 10) and so it
can be passed to Float or evalf to obtain an arbitrary precision with
matching accuracy:
>>> Float(exact, 5)
0.12500
>>> exact.evalf(20)
0.12500000000000000000
Trying to make a high-precision Float from a float is not disallowed,
but one must keep in mind that the *underlying float* (not the apparent
decimal value) is being obtained with high precision. For example, 0.3
does not have a finite binary representation. The closest rational is
the fraction 5404319552844595/2**54. So if you try to obtain a Float of
0.3 to 20 digits of precision you will not see the same thing as 0.3
followed by 19 zeros:
>>> Float(0.3, 20)
0.29999999999999998890
If you want a 20-digit value of the decimal 0.3 (not the floating point
approximation of 0.3) you should send the 0.3 as a string. The underlying
representation is still binary but a higher precision than Python's float
is used:
>>> Float('0.3', 20)
0.30000000000000000000
Although you can increase the precision of an existing Float using Float
it will not increase the accuracy -- the underlying value is not changed:
>>> def show(f): # binary rep of Float
... from sympy import Mul, Pow
... s, m, e, b = f._mpf_
... v = Mul(int(m), Pow(2, int(e), evaluate=False), evaluate=False)
... print('%s at prec=%s' % (v, f._prec))
...
>>> t = Float('0.3', 3)
>>> show(t)
4915/2**14 at prec=13
>>> show(Float(t, 20)) # higher prec, not higher accuracy
4915/2**14 at prec=70
>>> show(Float(t, 2)) # lower prec
307/2**10 at prec=10
The same thing happens when evalf is used on a Float:
>>> show(t.evalf(20))
4915/2**14 at prec=70
>>> show(t.evalf(2))
307/2**10 at prec=10
Finally, Floats can be instantiated with an mpf tuple (n, c, p) to
produce the number (-1)**n*c*2**p:
>>> n, c, p = 1, 5, 0
>>> (-1)**n*c*2**p
-5
>>> Float((1, 5, 0))
-5.00000000000000
An actual mpf tuple also contains the number of bits in c as the last
element of the tuple:
>>> _._mpf_
(1, 5, 0, 3)
This is not needed for instantiation and is not the same thing as the
precision. The mpf tuple and the precision are two separate quantities
that Float tracks.
"""
__slots__ = ['_mpf_', '_prec']
# A Float represents many real numbers,
# both rational and irrational.
is_rational = None
is_irrational = None
is_number = True
is_real = True
is_Float = True
def __new__(cls, num, dps=None, prec=None, precision=None):
if prec is not None:
SymPyDeprecationWarning(
feature="Using 'prec=XX' to denote decimal precision",
useinstead="'dps=XX' to denote decimal and 'precision=XX' "\
"for binary precision",
value="This is an effort to improve functionality "\
"of Float class. ").warn()
dps = prec
if dps is not None and precision is not None:
raise ValueError('Both decimal and binary precision supplied. '
'Supply only one. ')
if isinstance(num, string_types):
num = num.replace(' ', '')
if num.startswith('.') and len(num) > 1:
num = '0' + num
elif num.startswith('-.') and len(num) > 2:
num = '-0.' + num[2:]
elif isinstance(num, float) and num == 0:
num = '0'
elif isinstance(num, (SYMPY_INTS, Integer)):
num = str(num) # faster than mlib.from_int
elif num is S.Infinity:
num = '+inf'
elif num is S.NegativeInfinity:
num = '-inf'
elif isinstance(num, mpmath.mpf):
if precision is None:
if dps is None:
precision = num.context.prec
num = num._mpf_
if dps is None and precision is None:
dps = 15
if isinstance(num, Float):
return num
if isinstance(num, string_types) and _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
dps = max(15, dps)
precision = mlib.libmpf.dps_to_prec(dps)
elif precision == '' and dps is None or precision is None and dps == '':
if not isinstance(num, string_types):
raise ValueError('The null string can only be used when '
'the number to Float is passed as a string or an integer.')
ok = None
if _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
precision = mlib.libmpf.dps_to_prec(dps)
ok = True
if ok is None:
raise ValueError('string-float not recognized: %s' % num)
# decimal precision(dps) is set and maybe binary precision(precision)
# as well.From here on binary precision is used to compute the Float.
# Hence, if supplied use binary precision else translate from decimal
# precision.
if precision is None or precision == '':
precision = mlib.libmpf.dps_to_prec(dps)
if isinstance(num, float):
_mpf_ = mlib.from_float(num, precision, rnd)
elif isinstance(num, string_types):
_mpf_ = mlib.from_str(num, precision, rnd)
elif isinstance(num, decimal.Decimal):
if num.is_finite():
_mpf_ = mlib.from_str(str(num), precision, rnd)
elif num.is_nan():
_mpf_ = _mpf_nan
elif num.is_infinite():
if num > 0:
_mpf_ = _mpf_inf
else:
_mpf_ = _mpf_ninf
else:
raise ValueError("unexpected decimal value %s" % str(num))
elif isinstance(num, Rational):
_mpf_ = mlib.from_rational(num.p, num.q, precision, rnd)
elif isinstance(num, tuple) and len(num) in (3, 4):
if type(num[1]) is str:
# it's a hexadecimal (coming from a pickled object)
# assume that it is in standard form
num = list(num)
num[1] = long(num[1], 16)
_mpf_ = tuple(num)
else:
if len(num) == 4:
# handle normalization hack
return Float._new(num, precision)
else:
return (S.NegativeOne**num[0]*num[1]*S(2)**num[2]).evalf(precision)
elif isinstance(num, Float):
_mpf_ = num._mpf_
if precision < num._prec:
_mpf_ = mpf_norm(_mpf_, precision)
else:
# XXX: We lose precision here.
_mpf_ = mpmath.mpf(num)._mpf_
# special cases
if _mpf_ == _mpf_zero:
pass # we want a Float
elif _mpf_ == _mpf_nan:
return S.NaN
obj = Expr.__new__(cls)
obj._mpf_ = _mpf_
obj._prec = precision
return obj
@classmethod
def _new(cls, _mpf_, _prec):
# special cases
if _mpf_ == _mpf_zero:
return S.Zero # XXX this is different from Float which gives 0.0
elif _mpf_ == _mpf_nan:
return S.NaN
obj = Expr.__new__(cls)
obj._mpf_ = mpf_norm(_mpf_, _prec)
# XXX: Should this be obj._prec = obj._mpf_[3]?
obj._prec = _prec
return obj
# mpz can't be pickled
def __getnewargs__(self):
return (mlib.to_pickable(self._mpf_),)
def __getstate__(self):
return {'_prec': self._prec}
def _hashable_content(self):
return (self._mpf_, self._prec)
def floor(self):
return Integer(int(mlib.to_int(
mlib.mpf_floor(self._mpf_, self._prec))))
def ceiling(self):
return Integer(int(mlib.to_int(
mlib.mpf_ceil(self._mpf_, self._prec))))
@property
def num(self):
return mpmath.mpf(self._mpf_)
def _as_mpf_val(self, prec):
rv = mpf_norm(self._mpf_, prec)
if rv != self._mpf_ and self._prec == prec:
debug(self._mpf_, rv)
return rv
def _as_mpf_op(self, prec):
return self._mpf_, max(prec, self._prec)
def _eval_is_finite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return False
return True
def _eval_is_infinite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return True
return False
def _eval_is_integer(self):
return self._mpf_ == _mpf_zero
def _eval_is_negative(self):
if self._mpf_ == _mpf_ninf:
return True
if self._mpf_ == _mpf_inf:
return False
return self.num < 0
def _eval_is_positive(self):
if self._mpf_ == _mpf_inf:
return True
if self._mpf_ == _mpf_ninf:
return False
return self.num > 0
def _eval_is_zero(self):
return self._mpf_ == _mpf_zero
def __nonzero__(self):
return self._mpf_ != _mpf_zero
__bool__ = __nonzero__
def __neg__(self):
return Float._new(mlib.mpf_neg(self._mpf_), self._prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec)
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_sub(self._mpf_, rhs, prec, rnd), prec)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec)
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number) and other != 0 and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_div(self._mpf_, rhs, prec, rnd), prec)
return Number.__div__(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational) and other.q != 1 and global_evaluate[0]:
# calculate mod with Rationals, *then* round the result
return Float(Rational.__mod__(Rational(self), other),
prec_to_dps(self._prec))
if isinstance(other, Float) and global_evaluate[0]:
r = self/other
if r == int(r):
prec = max([prec_to_dps(i)
for i in (self._prec, other._prec)])
return Float(0, prec)
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Float) and global_evaluate[0]:
return other.__mod__(self)
if isinstance(other, Number) and global_evaluate[0]:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
"""
expt is symbolic object but not equal to 0, 1
(-p)**r -> exp(r*log(-p)) -> exp(r*(log(p) + I*Pi)) ->
-> p**r*(sin(Pi*r) + cos(Pi*r)*I)
"""
if self == 0:
if expt.is_positive:
return S.Zero
if expt.is_negative:
return Float('inf')
if isinstance(expt, Number):
if isinstance(expt, Integer):
prec = self._prec
return Float._new(
mlib.mpf_pow_int(self._mpf_, expt.p, prec, rnd), prec)
elif isinstance(expt, Rational) and \
expt.p == 1 and expt.q % 2 and self.is_negative:
return Pow(S.NegativeOne, expt, evaluate=False)*(
-self)._eval_power(expt)
expt, prec = expt._as_mpf_op(self._prec)
mpfself = self._mpf_
try:
y = mpf_pow(mpfself, expt, prec, rnd)
return Float._new(y, prec)
except mlib.ComplexResult:
re, im = mlib.mpc_pow(
(mpfself, _mpf_zero), (expt, _mpf_zero), prec, rnd)
return Float._new(re, prec) + \
Float._new(im, prec)*S.ImaginaryUnit
def __abs__(self):
return Float._new(mlib.mpf_abs(self._mpf_), self._prec)
def __int__(self):
if self._mpf_ == _mpf_zero:
return 0
return int(mlib.to_int(self._mpf_)) # uses round_fast = round_down
__long__ = __int__
def __eq__(self, other):
if isinstance(other, float):
# coerce to Float at same precision
o = Float(other)
try:
ompf = o._as_mpf_val(self._prec)
except ValueError:
return False
return bool(mlib.mpf_eq(self._mpf_, ompf))
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if isinstance(other, NumberSymbol):
if other.is_irrational:
return False
return other.__eq__(self)
if isinstance(other, Float):
return bool(mlib.mpf_eq(self._mpf_, other._mpf_))
if isinstance(other, Number):
# numbers should compare at the same precision;
# all _as_mpf_val routines should be sure to abide
# by the request to change the prec if necessary; if
# they don't, the equality test will fail since it compares
# the mpf tuples
ompf = other._as_mpf_val(self._prec)
return bool(mlib.mpf_eq(self._mpf_, ompf))
return False # Float != non-Number
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__le__(self)
if other.is_comparable:
other = other.evalf()
if isinstance(other, Number) and other is not S.NaN:
return _sympify(bool(
mlib.mpf_gt(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__lt__(self)
if other.is_comparable:
other = other.evalf()
if isinstance(other, Number) and other is not S.NaN:
return _sympify(bool(
mlib.mpf_ge(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__ge__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__ge__(self)
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number) and other is not S.NaN:
return _sympify(bool(
mlib.mpf_lt(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__gt__(self)
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number) and other is not S.NaN:
return _sympify(bool(
mlib.mpf_le(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__le__(self, other)
def __hash__(self):
return super(Float, self).__hash__()
def epsilon_eq(self, other, epsilon="1e-15"):
return abs(self - other) < Float(epsilon)
def _sage_(self):
import sage.all as sage
return sage.RealNumber(str(self))
def __format__(self, format_spec):
return format(decimal.Decimal(str(self)), format_spec)
# Add sympify converters
converter[float] = converter[decimal.Decimal] = Float
# this is here to work nicely in Sage
RealNumber = Float
class Rational(Number):
"""Represents integers and rational numbers (p/q) of any size.
Examples
========
>>> from sympy import Rational, nsimplify, S, pi
>>> Rational(3)
3
>>> Rational(1, 2)
1/2
Rational is unprejudiced in accepting input. If a float is passed, the
underlying value of the binary representation will be returned:
>>> Rational(.5)
1/2
>>> Rational(.2)
3602879701896397/18014398509481984
If the simpler representation of the float is desired then consider
limiting the denominator to the desired value or convert the float to
a string (which is roughly equivalent to limiting the denominator to
10**12):
>>> Rational(str(.2))
1/5
>>> Rational(.2).limit_denominator(10**12)
1/5
An arbitrarily precise Rational is obtained when a string literal is
passed:
>>> Rational("1.23")
123/100
>>> Rational('1e-2')
1/100
>>> Rational(".1")
1/10
>>> Rational('1e-2/3.2')
1/320
The conversion of other types of strings can be handled by
the sympify() function, and conversion of floats to expressions
or simple fractions can be handled with nsimplify:
>>> S('.[3]') # repeating digits in brackets
1/3
>>> S('3**2/10') # general expressions
9/10
>>> nsimplify(.3) # numbers that have a simple form
3/10
But if the input does not reduce to a literal Rational, an error will
be raised:
>>> Rational(pi)
Traceback (most recent call last):
...
TypeError: invalid input: pi
Low-level
---------
Access numerator and denominator as .p and .q:
>>> r = Rational(3, 4)
>>> r
3/4
>>> r.p
3
>>> r.q
4
Note that p and q return integers (not SymPy Integers) so some care
is needed when using them in expressions:
>>> r.p/r.q
0.75
See Also
========
sympify, sympy.simplify.simplify.nsimplify
"""
is_real = True
is_integer = False
is_rational = True
is_number = True
__slots__ = ['p', 'q']
is_Rational = True
@cacheit
def __new__(cls, p, q=None, gcd=None):
if q is None:
if isinstance(p, Rational):
return p
if isinstance(p, string_types):
if p.count('/') > 1:
raise TypeError('invalid input: %s' % p)
pq = p.rsplit('/', 1)
if len(pq) == 2:
p, q = pq
fp = fractions.Fraction(p)
fq = fractions.Fraction(q)
f = fp/fq
return Rational(f.numerator, f.denominator, 1)
p = p.replace(' ', '')
try:
p = fractions.Fraction(p)
except ValueError:
pass # error will raise below
if not isinstance(p, string_types):
try:
if isinstance(p, fractions.Fraction):
return Rational(p.numerator, p.denominator, 1)
except NameError:
pass # error will raise below
if isinstance(p, (float, Float)):
return Rational(*_as_integer_ratio(p))
if not isinstance(p, SYMPY_INTS + (Rational,)):
raise TypeError('invalid input: %s' % p)
q = q or S.One
gcd = 1
else:
p = Rational(p)
q = Rational(q)
if isinstance(q, Rational):
p *= q.q
q = q.p
if isinstance(p, Rational):
q *= p.q
p = p.p
# p and q are now integers
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
return S.ComplexInfinity
if q < 0:
q = -q
p = -p
if not gcd:
gcd = igcd(abs(p), q)
if gcd > 1:
p //= gcd
q //= gcd
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
f = fractions.Fraction(self.p, self.q)
return Rational(f.limit_denominator(fractions.Fraction(int(max_denominator))))
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(self.p + self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
#TODO: this can probably be optimized more
return Rational(self.p*other.q + self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return other + self
else:
return Number.__add__(self, other)
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(self.p - self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.p*other.q - self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return -other + self
else:
return Number.__sub__(self, other)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(self.q*other.p - self.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.q*other.p - self.p*other.q, self.q*other.q)
elif isinstance(other, Float):
return -self + other
else:
return Number.__rsub__(self, other)
return Number.__rsub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(self.p*other.p, self.q, igcd(other.p, self.q))
elif isinstance(other, Rational):
return Rational(self.p*other.p, self.q*other.q, igcd(self.p, other.q)*igcd(self.q, other.p))
elif isinstance(other, Float):
return other*self
else:
return Number.__mul__(self, other)
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
if self.p and other.p == S.Zero:
return S.ComplexInfinity
else:
return Rational(self.p, self.q*other.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational(self.p*other.q, self.q*other.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return self*(1/other)
else:
return Number.__div__(self, other)
return Number.__div__(self, other)
@_sympifyit('other', NotImplemented)
def __rdiv__(self, other):
if global_evaluate[0]:
if isinstance(other, Integer):
return Rational(other.p*self.q, self.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational(other.p*self.q, other.q*self.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return other*(1/self)
else:
return Number.__rdiv__(self, other)
return Number.__rdiv__(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if global_evaluate[0]:
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
# calculate mod with Rationals, *then* round the answer
return Float(self.__mod__(Rational(other)),
prec_to_dps(other._prec))
return Number.__mod__(self, other)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
if isinstance(expt, Number):
if isinstance(expt, Float):
return self._eval_evalf(expt._prec)**expt
if expt.is_negative:
# (3/4)**-2 -> (4/3)**2
ne = -expt
if (ne is S.One):
return Rational(self.q, self.p)
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(self.q, -self.p)**ne
else:
return S.NegativeOne**ne*Rational(self.q, -self.p)**ne
else:
return Rational(self.q, self.p)**ne
if expt is S.Infinity: # -oo already caught by test for negative
if self.p > self.q:
# (3/2)**oo -> oo
return S.Infinity
if self.p < -self.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity*S.ImaginaryUnit
return S.Zero
if isinstance(expt, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational(self.p**expt.p, self.q**expt.p, 1)
if isinstance(expt, Rational):
if self.p != 1:
# (4/3)**(5/6) -> 4**(5/6)*3**(-5/6)
return Integer(self.p)**expt*Integer(self.q)**(-expt)
# as the above caught negative self.p, now self is positive
return Integer(self.q)**Rational(
expt.p*(expt.q - 1), expt.q) / \
Integer(self.q)**Integer(expt.p)
if self.is_negative and expt.is_even:
return (-self)**expt
return
def _as_mpf_val(self, prec):
return mlib.from_rational(self.p, self.q, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd))
def __abs__(self):
return Rational(abs(self.p), self.q)
def __int__(self):
p, q = self.p, self.q
if p < 0:
return -int(-p//q)
return int(p//q)
__long__ = __int__
def floor(self):
return Integer(self.p // self.q)
def ceiling(self):
return -Integer(-self.p // self.q)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if isinstance(other, NumberSymbol):
if other.is_irrational:
return False
return other.__eq__(self)
if isinstance(other, Number):
if isinstance(other, Rational):
# a Rational is always in reduced form so will never be 2/4
# so we can just check equivalence of args
return self.p == other.p and self.q == other.q
if isinstance(other, Float):
return mlib.mpf_eq(self._as_mpf_val(other._prec), other._mpf_)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__le__(self)
expr = self
if isinstance(other, Number):
if isinstance(other, Rational):
return _sympify(bool(self.p*other.q > self.q*other.p))
if isinstance(other, Float):
return _sympify(bool(mlib.mpf_gt(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__gt__(expr, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__lt__(self)
expr = self
if isinstance(other, Number):
if isinstance(other, Rational):
return _sympify(bool(self.p*other.q >= self.q*other.p))
if isinstance(other, Float):
return _sympify(bool(mlib.mpf_ge(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__ge__(expr, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__ge__(self)
expr = self
if isinstance(other, Number):
if isinstance(other, Rational):
return _sympify(bool(self.p*other.q < self.q*other.p))
if isinstance(other, Float):
return _sympify(bool(mlib.mpf_lt(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__lt__(expr, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
expr = self
if isinstance(other, NumberSymbol):
return other.__gt__(self)
elif isinstance(other, Number):
if isinstance(other, Rational):
return _sympify(bool(self.p*other.q <= self.q*other.p))
if isinstance(other, Float):
return _sympify(bool(mlib.mpf_le(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__le__(expr, other)
def __hash__(self):
return super(Rational, self).__hash__()
def factors(self, limit=None, use_trial=True, use_rho=False,
use_pm1=False, verbose=False, visual=False):
"""A wrapper to factorint which return factors of self that are
smaller than limit (or cheap to compute). Special methods of
factoring are disabled by default so that only trial division is used.
"""
from sympy.ntheory import factorrat
return factorrat(self, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose).copy()
@_sympifyit('other', NotImplemented)
def gcd(self, other):
if isinstance(other, Rational):
if other is S.Zero:
return other
return Rational(
Integer(igcd(self.p, other.p)),
Integer(ilcm(self.q, other.q)))
return Number.gcd(self, other)
@_sympifyit('other', NotImplemented)
def lcm(self, other):
if isinstance(other, Rational):
return Rational(
self.p*other.p//igcd(self.p, other.p),
igcd(self.q, other.q))
return Number.lcm(self, other)
def as_numer_denom(self):
return Integer(self.p), Integer(self.q)
def _sage_(self):
import sage.all as sage
return sage.Integer(self.p)/sage.Integer(self.q)
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import S
>>> (S(-3)/2).as_content_primitive()
(3/2, -1)
See docstring of Expr.as_content_primitive for more examples.
"""
if self:
if self.is_positive:
return self, S.One
return -self, S.NegativeOne
return S.One, self
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return self, S.One
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation. """
return self, S.Zero
# int -> Integer
_intcache = {}
# TODO move this tracing facility to sympy/core/trace.py ?
def _intcache_printinfo():
ints = sorted(_intcache.keys())
nhit = _intcache_hits
nmiss = _intcache_misses
if nhit == 0 and nmiss == 0:
print()
print('Integer cache statistic was not collected')
return
miss_ratio = float(nmiss) / (nhit + nmiss)
print()
print('Integer cache statistic')
print('-----------------------')
print()
print('#items: %i' % len(ints))
print()
print(' #hit #miss #total')
print()
print('%5i %5i (%7.5f %%) %5i' % (
nhit, nmiss, miss_ratio*100, nhit + nmiss)
)
print()
print(ints)
_intcache_hits = 0
_intcache_misses = 0
def int_trace(f):
import os
if os.getenv('SYMPY_TRACE_INT', 'no').lower() != 'yes':
return f
def Integer_tracer(cls, i):
global _intcache_hits, _intcache_misses
try:
_intcache_hits += 1
return _intcache[i]
except KeyError:
_intcache_hits -= 1
_intcache_misses += 1
return f(cls, i)
# also we want to hook our _intcache_printinfo into sys.atexit
import atexit
atexit.register(_intcache_printinfo)
return Integer_tracer
class Integer(Rational):
q = 1
is_integer = True
is_number = True
is_Integer = True
__slots__ = ['p']
def _as_mpf_val(self, prec):
return mlib.from_int(self.p, prec)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(self._as_mpf_val(prec))
# TODO caching with decorator, but not to degrade performance
@int_trace
def __new__(cls, i):
if isinstance(i, string_types):
i = i.replace(' ', '')
# whereas we cannot, in general, make a Rational from an
# arbitrary expression, we can make an Integer unambiguously
# (except when a non-integer expression happens to round to
# an integer). So we proceed by taking int() of the input and
# let the int routines determine whether the expression can
# be made into an int or whether an error should be raised.
try:
ival = int(i)
except TypeError:
raise TypeError(
'Integer can only work with integer expressions.')
try:
return _intcache[ival]
except KeyError:
# We only work with well-behaved integer types. This converts, for
# example, numpy.int32 instances.
obj = Expr.__new__(cls)
obj.p = ival
_intcache[ival] = obj
return obj
def __getnewargs__(self):
return (self.p,)
# Arithmetic operations are here for efficiency
def __int__(self):
return self.p
__long__ = __int__
def floor(self):
return Integer(self.p)
def ceiling(self):
return Integer(self.p)
def __neg__(self):
return Integer(-self.p)
def __abs__(self):
if self.p >= 0:
return self
else:
return Integer(-self.p)
def __divmod__(self, other):
from .containers import Tuple
if isinstance(other, Integer) and global_evaluate[0]:
return Tuple(*(divmod(self.p, other.p)))
else:
return Number.__divmod__(self, other)
def __rdivmod__(self, other):
from .containers import Tuple
if isinstance(other, integer_types) and global_evaluate[0]:
return Tuple(*(divmod(other, self.p)))
else:
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
oname = type(other).__name__
sname = type(self).__name__
raise TypeError(msg % (oname, sname))
return Number.__divmod__(other, self)
# TODO make it decorator + bytecodehacks?
def __add__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(self.p + other)
elif isinstance(other, Integer):
return Integer(self.p + other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.q + other.p, other.q, 1)
return Rational.__add__(self, other)
else:
return Add(self, other)
def __radd__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(other + self.p)
elif isinstance(other, Rational):
return Rational(other.p + self.p*other.q, other.q, 1)
return Rational.__radd__(self, other)
return Rational.__radd__(self, other)
def __sub__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(self.p - other)
elif isinstance(other, Integer):
return Integer(self.p - other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.q - other.p, other.q, 1)
return Rational.__sub__(self, other)
return Rational.__sub__(self, other)
def __rsub__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(other - self.p)
elif isinstance(other, Rational):
return Rational(other.p - self.p*other.q, other.q, 1)
return Rational.__rsub__(self, other)
return Rational.__rsub__(self, other)
def __mul__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(self.p*other)
elif isinstance(other, Integer):
return Integer(self.p*other.p)
elif isinstance(other, Rational):
return Rational(self.p*other.p, other.q, igcd(self.p, other.q))
return Rational.__mul__(self, other)
return Rational.__mul__(self, other)
def __rmul__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(other*self.p)
elif isinstance(other, Rational):
return Rational(other.p*self.p, other.q, igcd(self.p, other.q))
return Rational.__rmul__(self, other)
return Rational.__rmul__(self, other)
def __mod__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(self.p % other)
elif isinstance(other, Integer):
return Integer(self.p % other.p)
return Rational.__mod__(self, other)
return Rational.__mod__(self, other)
def __rmod__(self, other):
if global_evaluate[0]:
if isinstance(other, integer_types):
return Integer(other % self.p)
elif isinstance(other, Integer):
return Integer(other.p % self.p)
return Rational.__rmod__(self, other)
return Rational.__rmod__(self, other)
def __eq__(self, other):
if isinstance(other, integer_types):
return (self.p == other)
elif isinstance(other, Integer):
return (self.p == other.p)
return Rational.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if isinstance(other, Integer):
return _sympify(self.p > other.p)
return Rational.__gt__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if isinstance(other, Integer):
return _sympify(self.p < other.p)
return Rational.__lt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if isinstance(other, Integer):
return _sympify(self.p >= other.p)
return Rational.__ge__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if isinstance(other, Integer):
return _sympify(self.p <= other.p)
return Rational.__le__(self, other)
def __hash__(self):
return hash(self.p)
def __index__(self):
return self.p
########################################
def _eval_is_odd(self):
return bool(self.p % 2)
def _eval_power(self, expt):
"""
Tries to do some simplifications on self**expt
Returns None if no further simplifications can be done
When exponent is a fraction (so we have for example a square root),
we try to find a simpler representation by factoring the argument
up to factors of 2**15, e.g.
- sqrt(4) becomes 2
- sqrt(-4) becomes 2*I
- (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7)
Further simplification would require a special call to factorint on
the argument which is not done here for sake of speed.
"""
from sympy import perfect_power
if expt is S.Infinity:
if self.p > S.One:
return S.Infinity
# cases -1, 0, 1 are done in their respective classes
return S.Infinity + S.ImaginaryUnit*S.Infinity
if expt is S.NegativeInfinity:
return Rational(1, self)**S.Infinity
if not isinstance(expt, Number):
# simplify when expt is even
# (-2)**k --> 2**k
if self.is_negative and expt.is_even:
return (-self)**expt
if isinstance(expt, Float):
# Rational knows how to exponentiate by a Float
return super(Integer, self)._eval_power(expt)
if not isinstance(expt, Rational):
return
if expt is S.Half and self.is_negative:
# we extract I for this special case since everyone is doing so
return S.ImaginaryUnit*Pow(-self, expt)
if expt.is_negative:
# invert base and change sign on exponent
ne = -expt
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(1, -self)**ne
else:
return (S.NegativeOne)**ne*Rational(1, -self)**ne
else:
return Rational(1, self.p)**ne
# see if base is a perfect root, sqrt(4) --> 2
x, xexact = integer_nthroot(abs(self.p), expt.q)
if xexact:
# if it's a perfect root we've finished
result = Integer(x**abs(expt.p))
if self.is_negative:
result *= S.NegativeOne**expt
return result
# The following is an algorithm where we collect perfect roots
# from the factors of base.
# if it's not an nth root, it still might be a perfect power
b_pos = int(abs(self.p))
p = perfect_power(b_pos)
if p is not False:
dict = {p[0]: p[1]}
else:
dict = Integer(self).factors(limit=2**15)
# now process the dict of factors
if self.is_negative:
dict[-1] = 1
out_int = 1 # integer part
out_rad = 1 # extracted radicals
sqr_int = 1
sqr_gcd = 0
sqr_dict = {}
for prime, exponent in dict.items():
exponent *= expt.p
# remove multiples of expt.q: (2**12)**(1/10) -> 2*(2**2)**(1/10)
div_e, div_m = divmod(exponent, expt.q)
if div_e > 0:
out_int *= prime**div_e
if div_m > 0:
# see if the reduced exponent shares a gcd with e.q
# (2**2)**(1/10) -> 2**(1/5)
g = igcd(div_m, expt.q)
if g != 1:
out_rad *= Pow(prime, Rational(div_m//g, expt.q//g))
else:
sqr_dict[prime] = div_m
# identify gcd of remaining powers
for p, ex in sqr_dict.items():
if sqr_gcd == 0:
sqr_gcd = ex
else:
sqr_gcd = igcd(sqr_gcd, ex)
if sqr_gcd == 1:
break
for k, v in sqr_dict.items():
sqr_int *= k**(v//sqr_gcd)
if sqr_int == self and out_int == 1 and out_rad == 1:
result = None
else:
result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q))
return result
def _eval_is_prime(self):
from sympy.ntheory import isprime
return isprime(self)
def _eval_is_composite(self):
if self > 1:
return fuzzy_not(self.is_prime)
else:
return False
def as_numer_denom(self):
return self, S.One
def __floordiv__(self, other):
return Integer(self.p // Integer(other).p)
def __rfloordiv__(self, other):
return Integer(Integer(other).p // self.p)
# Add sympify converters
for i_type in integer_types:
converter[i_type] = Integer
class AlgebraicNumber(Expr):
"""Class for representing algebraic numbers in SymPy. """
__slots__ = ['rep', 'root', 'alias', 'minpoly']
is_AlgebraicNumber = True
is_algebraic = True
is_number = True
def __new__(cls, expr, coeffs=None, alias=None, **args):
"""Construct a new algebraic number. """
from sympy import Poly
from sympy.polys.polyclasses import ANP, DMP
from sympy.polys.numberfields import minimal_polynomial
from sympy.core.symbol import Symbol
expr = sympify(expr)
if isinstance(expr, (tuple, Tuple)):
minpoly, root = expr
if not minpoly.is_Poly:
minpoly = Poly(minpoly)
elif expr.is_AlgebraicNumber:
minpoly, root = expr.minpoly, expr.root
else:
minpoly, root = minimal_polynomial(
expr, args.get('gen'), polys=True), expr
dom = minpoly.get_domain()
if coeffs is not None:
if not isinstance(coeffs, ANP):
rep = DMP.from_sympy_list(sympify(coeffs), 0, dom)
scoeffs = Tuple(*coeffs)
else:
rep = DMP.from_list(coeffs.to_list(), 0, dom)
scoeffs = Tuple(*coeffs.to_list())
if rep.degree() >= minpoly.degree():
rep = rep.rem(minpoly.rep)
else:
rep = DMP.from_list([1, 0], 0, dom)
scoeffs = Tuple(1, 0)
if root.is_negative:
rep = -rep
scoeffs = Tuple(-1, 0)
sargs = (root, scoeffs)
if alias is not None:
if not isinstance(alias, Symbol):
alias = Symbol(alias)
sargs = sargs + (alias,)
obj = Expr.__new__(cls, *sargs)
obj.rep = rep
obj.root = root
obj.alias = alias
obj.minpoly = minpoly
return obj
def __hash__(self):
return super(AlgebraicNumber, self).__hash__()
def _eval_evalf(self, prec):
return self.as_expr()._evalf(prec)
@property
def is_aliased(self):
"""Returns ``True`` if ``alias`` was set. """
return self.alias is not None
def as_poly(self, x=None):
"""Create a Poly instance from ``self``. """
from sympy import Dummy, Poly, PurePoly
if x is not None:
return Poly.new(self.rep, x)
else:
if self.alias is not None:
return Poly.new(self.rep, self.alias)
else:
return PurePoly.new(self.rep, Dummy('x'))
def as_expr(self, x=None):
"""Create a Basic expression from ``self``. """
return self.as_poly(x or self.root).as_expr().expand()
def coeffs(self):
"""Returns all SymPy coefficients of an algebraic number. """
return [ self.rep.dom.to_sympy(c) for c in self.rep.all_coeffs() ]
def native_coeffs(self):
"""Returns all native coefficients of an algebraic number. """
return self.rep.all_coeffs()
def to_algebraic_integer(self):
"""Convert ``self`` to an algebraic integer. """
from sympy import Poly
f = self.minpoly
if f.LC() == 1:
return self
coeff = f.LC()**(f.degree() - 1)
poly = f.compose(Poly(f.gen/f.LC()))
minpoly = poly*coeff
root = f.LC()*self.root
return AlgebraicNumber((minpoly, root), self.coeffs())
def _eval_simplify(self, ratio, measure):
from sympy.polys import CRootOf, minpoly
for r in [r for r in self.minpoly.all_roots() if r.func != CRootOf]:
if minpoly(self.root - r).is_Symbol:
# use the matching root if it's simpler
if measure(r) < ratio*measure(self.root):
return AlgebraicNumber(r)
return self
class RationalConstant(Rational):
"""
Abstract base class for rationals with specific behaviors
Derived classes must define class attributes p and q and should probably all
be singletons.
"""
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class IntegerConstant(Integer):
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class Zero(with_metaclass(Singleton, IntegerConstant)):
"""The number zero.
Zero is a singleton, and can be accessed by ``S.Zero``
Examples
========
>>> from sympy import S, Integer, zoo
>>> Integer(0) is S.Zero
True
>>> 1/S.Zero
zoo
References
==========
.. [1] http://en.wikipedia.org/wiki/Zero
"""
p = 0
q = 1
is_positive = False
is_negative = False
is_zero = True
is_number = True
__slots__ = []
@staticmethod
def __abs__():
return S.Zero
@staticmethod
def __neg__():
return S.Zero
def _eval_power(self, expt):
if expt.is_positive:
return self
if expt.is_negative:
return S.ComplexInfinity
if expt.is_real is False:
return S.NaN
# infinities are already handled with pos and neg
# tests above; now throw away leading numbers on Mul
# exponent
coeff, terms = expt.as_coeff_Mul()
if coeff.is_negative:
return S.ComplexInfinity**terms
if coeff is not S.One: # there is a Number to discard
return self**terms
def _eval_order(self, *symbols):
# Order(0,x) -> 0
return self
def __nonzero__(self):
return False
__bool__ = __nonzero__
def as_coeff_Mul(self, rational=False): # XXX this routine should be deleted
"""Efficiently extract the coefficient of a summation. """
return S.One, self
class One(with_metaclass(Singleton, IntegerConstant)):
"""The number one.
One is a singleton, and can be accessed by ``S.One``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(1) is S.One
True
References
==========
.. [1] http://en.wikipedia.org/wiki/1_%28number%29
"""
is_number = True
p = 1
q = 1
__slots__ = []
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.NegativeOne
def _eval_power(self, expt):
return self
def _eval_order(self, *symbols):
return
@staticmethod
def factors(limit=None, use_trial=True, use_rho=False, use_pm1=False,
verbose=False, visual=False):
if visual:
return S.One
else:
return {}
class NegativeOne(with_metaclass(Singleton, IntegerConstant)):
"""The number negative one.
NegativeOne is a singleton, and can be accessed by ``S.NegativeOne``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(-1) is S.NegativeOne
True
See Also
========
One
References
==========
.. [1] http://en.wikipedia.org/wiki/%E2%88%921_%28number%29
"""
is_number = True
p = -1
q = 1
__slots__ = []
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.One
def _eval_power(self, expt):
if expt.is_odd:
return S.NegativeOne
if expt.is_even:
return S.One
if isinstance(expt, Number):
if isinstance(expt, Float):
return Float(-1.0)**expt
if expt is S.NaN:
return S.NaN
if expt is S.Infinity or expt is S.NegativeInfinity:
return S.NaN
if expt is S.Half:
return S.ImaginaryUnit
if isinstance(expt, Rational):
if expt.q == 2:
return S.ImaginaryUnit**Integer(expt.p)
i, r = divmod(expt.p, expt.q)
if i:
return self**i*self**Rational(r, expt.q)
return
class Half(with_metaclass(Singleton, RationalConstant)):
"""The rational number 1/2.
Half is a singleton, and can be accessed by ``S.Half``.
Examples
========
>>> from sympy import S, Rational
>>> Rational(1, 2) is S.Half
True
References
==========
.. [1] http://en.wikipedia.org/wiki/One_half
"""
is_number = True
p = 1
q = 2
__slots__ = []
@staticmethod
def __abs__():
return S.Half
class Infinity(with_metaclass(Singleton, Number)):
r"""Positive infinite quantity.
In real analysis the symbol `\infty` denotes an unbounded
limit: `x\to\infty` means that `x` grows without bound.
Infinity is often used not only to define a limit but as a value
in the affinely extended real number system. Points labeled `+\infty`
and `-\infty` can be added to the topological space of the real numbers,
producing the two-point compactification of the real numbers. Adding
algebraic properties to this gives us the extended real numbers.
Infinity is a singleton, and can be accessed by ``S.Infinity``,
or can be imported as ``oo``.
Examples
========
>>> from sympy import oo, exp, limit, Symbol
>>> 1 + oo
oo
>>> 42/oo
0
>>> x = Symbol('x')
>>> limit(exp(x), x, oo)
oo
See Also
========
NegativeInfinity, NaN
References
==========
.. [1] http://en.wikipedia.org/wiki/Infinity
"""
is_commutative = True
is_positive = True
is_infinite = True
is_number = True
is_prime = False
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\infty"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf'):
return S.NaN
else:
return Float('inf')
else:
return S.Infinity
return NotImplemented
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.Infinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('inf'):
return S.NaN
else:
return Float('inf')
else:
return S.Infinity
return NotImplemented
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.Zero or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == 0:
return S.NaN
if other > 0:
return Float('inf')
else:
return Float('-inf')
else:
if other > 0:
return S.Infinity
else:
return S.NegativeInfinity
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf') or \
other == Float('inf'):
return S.NaN
elif other.is_nonnegative:
return Float('inf')
else:
return Float('-inf')
else:
if other >= 0:
return S.Infinity
else:
return S.NegativeInfinity
return NotImplemented
__truediv__ = __div__
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.NegativeInfinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``oo ** nan`` ``nan``
``oo ** -p`` ``0`` ``p`` is number, ``oo``
================ ======= ==============================
See Also
========
Pow
NaN
NegativeInfinity
"""
from sympy.functions import re
if expt.is_positive:
return S.Infinity
if expt.is_negative:
return S.Zero
if expt is S.NaN:
return S.NaN
if expt is S.ComplexInfinity:
return S.NaN
if expt.is_real is False and expt.is_number:
expt_real = re(expt)
if expt_real.is_positive:
return S.ComplexInfinity
if expt_real.is_negative:
return S.Zero
if expt_real.is_zero:
return S.NaN
return self**expt.evalf()
def _as_mpf_val(self, prec):
return mlib.finf
def _sage_(self):
import sage.all as sage
return sage.oo
def __hash__(self):
return super(Infinity, self).__hash__()
def __eq__(self, other):
return other is S.Infinity
def __ne__(self, other):
return other is not S.Infinity
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_real:
return S.false
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.NegativeInfinity:
return S.false
elif other.is_nonpositive:
return S.false
elif other.is_infinite and other.is_positive:
return S.true
return Expr.__le__(self, other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.NegativeInfinity:
return S.true
elif other.is_nonpositive:
return S.true
elif other.is_infinite and other.is_positive:
return S.false
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_real:
return S.true
return Expr.__ge__(self, other)
def __mod__(self, other):
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
oo = S.Infinity
class NegativeInfinity(with_metaclass(Singleton, Number)):
"""Negative infinite quantity.
NegativeInfinity is a singleton, and can be accessed
by ``S.NegativeInfinity``.
See Also
========
Infinity
"""
is_commutative = True
is_negative = True
is_infinite = True
is_number = True
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"-\infty"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.Infinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('inf'):
return Float('nan')
else:
return Float('-inf')
else:
return S.NegativeInfinity
return NotImplemented
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf'):
return Float('nan')
else:
return Float('-inf')
else:
return S.NegativeInfinity
return NotImplemented
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.Zero or other is S.NaN:
return S.NaN
elif other.is_Float:
if other is S.NaN or other.is_zero:
return S.NaN
elif other.is_positive:
return Float('-inf')
else:
return Float('inf')
else:
if other.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf') or \
other == Float('inf') or \
other is S.NaN:
return S.NaN
elif other.is_nonnegative:
return Float('-inf')
else:
return Float('inf')
else:
if other >= 0:
return S.NegativeInfinity
else:
return S.Infinity
return NotImplemented
__truediv__ = __div__
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.Infinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``(-oo) ** nan`` ``nan``
``(-oo) ** oo`` ``nan``
``(-oo) ** -oo`` ``nan``
``(-oo) ** e`` ``oo`` ``e`` is positive even integer
``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer
================ ======= ==============================
See Also
========
Infinity
Pow
NaN
"""
if expt.is_number:
if expt is S.NaN or \
expt is S.Infinity or \
expt is S.NegativeInfinity:
return S.NaN
if isinstance(expt, Integer) and expt.is_positive:
if expt.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
return S.NegativeOne**expt*S.Infinity**expt
def _as_mpf_val(self, prec):
return mlib.fninf
def _sage_(self):
import sage.all as sage
return -(sage.oo)
def __hash__(self):
return super(NegativeInfinity, self).__hash__()
def __eq__(self, other):
return other is S.NegativeInfinity
def __ne__(self, other):
return other is not S.NegativeInfinity
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.Infinity:
return S.true
elif other.is_nonnegative:
return S.true
elif other.is_infinite and other.is_negative:
return S.false
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if other.is_real:
return S.true
return Expr.__le__(self, other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_real:
return S.false
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.Infinity:
return S.false
elif other.is_nonnegative:
return S.false
elif other.is_infinite and other.is_negative:
return S.true
return Expr.__ge__(self, other)
def __mod__(self, other):
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
class NaN(with_metaclass(Singleton, Number)):
"""
Not a Number.
This serves as a place holder for numeric values that are indeterminate.
Most operations on NaN, produce another NaN. Most indeterminate forms,
such as ``0/0`` or ``oo - oo` produce NaN. Two exceptions are ``0**0``
and ``oo**0``, which all produce ``1`` (this is consistent with Python's
float).
NaN is loosely related to floating point nan, which is defined in the
IEEE 754 floating point standard, and corresponds to the Python
``float('nan')``. Differences are noted below.
NaN is mathematically not equal to anything else, even NaN itself. This
explains the initially counter-intuitive results with ``Eq`` and ``==`` in
the examples below.
NaN is not comparable so inequalities raise a TypeError. This is in
constrast with floating point nan where all inequalities are false.
NaN is a singleton, and can be accessed by ``S.NaN``, or can be imported
as ``nan``.
Examples
========
>>> from sympy import nan, S, oo, Eq
>>> nan is S.NaN
True
>>> oo - oo
nan
>>> nan + 1
nan
>>> Eq(nan, nan) # mathematical equality
False
>>> nan == nan # structural equality
True
References
==========
.. [1] http://en.wikipedia.org/wiki/NaN
"""
is_commutative = True
is_real = None
is_rational = None
is_algebraic = None
is_transcendental = None
is_integer = None
is_comparable = False
is_finite = None
is_zero = None
is_prime = None
is_positive = None
is_negative = None
is_number = True
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\mathrm{NaN}"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __div__(self, other):
return self
__truediv__ = __div__
def floor(self):
return self
def ceiling(self):
return self
def _as_mpf_val(self, prec):
return _mpf_nan
def _sage_(self):
import sage.all as sage
return sage.NaN
def __hash__(self):
return super(NaN, self).__hash__()
def __eq__(self, other):
# NaN is structurally equal to another NaN
return other is S.NaN
def __ne__(self, other):
return other is not S.NaN
def _eval_Eq(self, other):
# NaN is not mathematically equal to anything, even NaN
return S.false
# Expr will _sympify and raise TypeError
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
nan = S.NaN
class ComplexInfinity(with_metaclass(Singleton, AtomicExpr)):
r"""Complex infinity.
In complex analysis the symbol `\tilde\infty`, called "complex
infinity", represents a quantity with infinite magnitude, but
undetermined complex phase.
ComplexInfinity is a singleton, and can be accessed by
``S.ComplexInfinity``, or can be imported as ``zoo``.
Examples
========
>>> from sympy import zoo, oo
>>> zoo + 42
zoo
>>> 42/zoo
0
>>> zoo + zoo
nan
>>> zoo*zoo
zoo
See Also
========
Infinity
"""
is_commutative = True
is_infinite = True
is_number = True
is_prime = False
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\tilde{\infty}"
@staticmethod
def __abs__():
return S.Infinity
def floor(self):
return self
def ceiling(self):
return self
@staticmethod
def __neg__():
return S.ComplexInfinity
def _eval_power(self, expt):
if expt is S.ComplexInfinity:
return S.NaN
if isinstance(expt, Number):
if expt is S.Zero:
return S.NaN
else:
if expt.is_positive:
return S.ComplexInfinity
else:
return S.Zero
def _sage_(self):
import sage.all as sage
return sage.UnsignedInfinityRing.gen()
zoo = S.ComplexInfinity
class NumberSymbol(AtomicExpr):
is_commutative = True
is_finite = True
is_number = True
__slots__ = []
is_NumberSymbol = True
def __new__(cls):
return AtomicExpr.__new__(cls)
def approximation(self, number_cls):
""" Return an interval with number_cls endpoints
that contains the value of NumberSymbol.
If not implemented, then return None.
"""
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if self is other:
return True
if isinstance(other, Number) and self.is_irrational:
return False
return False # NumberSymbol != non-(Number|self)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if self is other:
return S.false
if isinstance(other, Number):
approx = self.approximation_interval(other.__class__)
if approx is not None:
l, u = approx
if other < l:
return S.false
if other > u:
return S.true
return _sympify(self.evalf() < other)
if other.is_real and other.is_number:
other = other.evalf()
return _sympify(self.evalf() < other)
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if self is other:
return S.true
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number):
return _sympify(self.evalf() <= other)
return Expr.__le__(self, other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
r = _sympify((-self) < (-other))
if r in (S.true, S.false):
return r
else:
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
r = _sympify((-self) <= (-other))
if r in (S.true, S.false):
return r
else:
return Expr.__ge__(self, other)
def __int__(self):
# subclass with appropriate return value
raise NotImplementedError
def __long__(self):
return self.__int__()
def __hash__(self):
return super(NumberSymbol, self).__hash__()
class Exp1(with_metaclass(Singleton, NumberSymbol)):
r"""The `e` constant.
The transcendental number `e = 2.718281828\ldots` is the base of the
natural logarithm and of the exponential function, `e = \exp(1)`.
Sometimes called Euler's number or Napier's constant.
Exp1 is a singleton, and can be accessed by ``S.Exp1``,
or can be imported as ``E``.
Examples
========
>>> from sympy import exp, log, E
>>> E is exp(1)
True
>>> log(E)
1
References
==========
.. [1] http://en.wikipedia.org/wiki/E_%28mathematical_constant%29
"""
is_real = True
is_positive = True
is_negative = False # XXX Forces is_negative/is_nonnegative
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = []
def _latex(self, printer):
return r"e"
@staticmethod
def __abs__():
return S.Exp1
def __int__(self):
return 2
def _as_mpf_val(self, prec):
return mpf_e(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(2), Integer(3))
elif issubclass(number_cls, Rational):
pass
def _eval_power(self, expt):
from sympy import exp
return exp(expt)
def _eval_rewrite_as_sin(self):
from sympy import sin
I = S.ImaginaryUnit
return sin(I + S.Pi/2) - I*sin(I)
def _eval_rewrite_as_cos(self):
from sympy import cos
I = S.ImaginaryUnit
return cos(I) + I*cos(I + S.Pi/2)
def _sage_(self):
import sage.all as sage
return sage.e
E = S.Exp1
class Pi(with_metaclass(Singleton, NumberSymbol)):
r"""The `\pi` constant.
The transcendental number `\pi = 3.141592654\ldots` represents the ratio
of a circle's circumference to its diameter, the area of the unit circle,
the half-period of trigonometric functions, and many other things
in mathematics.
Pi is a singleton, and can be accessed by ``S.Pi``, or can
be imported as ``pi``.
Examples
========
>>> from sympy import S, pi, oo, sin, exp, integrate, Symbol
>>> S.Pi
pi
>>> pi > 3
True
>>> pi.is_irrational
True
>>> x = Symbol('x')
>>> sin(x + 2*pi)
sin(x)
>>> integrate(exp(-x**2), (x, -oo, oo))
sqrt(pi)
References
==========
.. [1] http://en.wikipedia.org/wiki/Pi
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = []
def _latex(self, printer):
return r"\pi"
@staticmethod
def __abs__():
return S.Pi
def __int__(self):
return 3
def _as_mpf_val(self, prec):
return mpf_pi(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(3), Integer(4))
elif issubclass(number_cls, Rational):
return (Rational(223, 71), Rational(22, 7))
def _sage_(self):
import sage.all as sage
return sage.pi
pi = S.Pi
class GoldenRatio(with_metaclass(Singleton, NumberSymbol)):
r"""The golden ratio, `\phi`.
`\phi = \frac{1 + \sqrt{5}}{2}` is algebraic number. Two quantities
are in the golden ratio if their ratio is the same as the ratio of
their sum to the larger of the two quantities, i.e. their maximum.
GoldenRatio is a singleton, and can be accessed by ``S.GoldenRatio``.
Examples
========
>>> from sympy import S
>>> S.GoldenRatio > 1
True
>>> S.GoldenRatio.expand(func=True)
1/2 + sqrt(5)/2
>>> S.GoldenRatio.is_irrational
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Golden_ratio
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = []
def _latex(self, printer):
return r"\phi"
def __int__(self):
return 1
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
rv = mlib.from_man_exp(phi_fixed(prec + 10), -prec - 10)
return mpf_norm(rv, prec)
def _eval_expand_func(self, **hints):
from sympy import sqrt
return S.Half + S.Half*sqrt(5)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
def _sage_(self):
import sage.all as sage
return sage.golden_ratio
_eval_rewrite_as_sqrt = _eval_expand_func
class EulerGamma(with_metaclass(Singleton, NumberSymbol)):
r"""The Euler-Mascheroni constant.
`\gamma = 0.5772157\ldots` (also called Euler's constant) is a mathematical
constant recurring in analysis and number theory. It is defined as the
limiting difference between the harmonic series and the
natural logarithm:
.. math:: \gamma = \lim\limits_{n\to\infty}
\left(\sum\limits_{k=1}^n\frac{1}{k} - \ln n\right)
EulerGamma is a singleton, and can be accessed by ``S.EulerGamma``.
Examples
========
>>> from sympy import S
>>> S.EulerGamma.is_irrational
>>> S.EulerGamma > 0
True
>>> S.EulerGamma > 1
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = []
def _latex(self, printer):
return r"\gamma"
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.libhyper.euler_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (S.Half, Rational(3, 5))
def _sage_(self):
import sage.all as sage
return sage.euler_gamma
class Catalan(with_metaclass(Singleton, NumberSymbol)):
r"""Catalan's constant.
`K = 0.91596559\ldots` is given by the infinite series
.. math:: K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}
Catalan is a singleton, and can be accessed by ``S.Catalan``.
Examples
========
>>> from sympy import S
>>> S.Catalan.is_irrational
>>> S.Catalan > 0
True
>>> S.Catalan > 1
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Catalan%27s_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = []
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.catalan_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (Rational(9, 10), S.One)
def _sage_(self):
import sage.all as sage
return sage.catalan
class ImaginaryUnit(with_metaclass(Singleton, AtomicExpr)):
r"""The imaginary unit, `i = \sqrt{-1}`.
I is a singleton, and can be accessed by ``S.I``, or can be
imported as ``I``.
Examples
========
>>> from sympy import I, sqrt
>>> sqrt(-1)
I
>>> I*I
-1
>>> 1/I
-I
References
==========
.. [1] http://en.wikipedia.org/wiki/Imaginary_unit
"""
is_commutative = True
is_imaginary = True
is_finite = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = []
def _latex(self, printer):
return r"i"
@staticmethod
def __abs__():
return S.One
def _eval_evalf(self, prec):
return self
def _eval_conjugate(self):
return -S.ImaginaryUnit
def _eval_power(self, expt):
"""
b is I = sqrt(-1)
e is symbolic object but not equal to 0, 1
I**r -> (-1)**(r/2) -> exp(r/2*Pi*I) -> sin(Pi*r/2) + cos(Pi*r/2)*I, r is decimal
I**0 mod 4 -> 1
I**1 mod 4 -> I
I**2 mod 4 -> -1
I**3 mod 4 -> -I
"""
if isinstance(expt, Number):
if isinstance(expt, Integer):
expt = expt.p % 4
if expt == 0:
return S.One
if expt == 1:
return S.ImaginaryUnit
if expt == 2:
return -S.One
return -S.ImaginaryUnit
return (S.NegativeOne)**(expt*S.Half)
return
def as_base_exp(self):
return S.NegativeOne, S.Half
def _sage_(self):
import sage.all as sage
return sage.I
@property
def _mpc_(self):
return (Float(0)._mpf_, Float(1)._mpf_)
I = S.ImaginaryUnit
def sympify_fractions(f):
return Rational(f.numerator, f.denominator)
converter[fractions.Fraction] = sympify_fractions
try:
if HAS_GMPY == 2:
import gmpy2 as gmpy
elif HAS_GMPY == 1:
import gmpy
else:
raise ImportError
def sympify_mpz(x):
return Integer(long(x))
def sympify_mpq(x):
return Rational(long(x.numerator), long(x.denominator))
converter[type(gmpy.mpz(1))] = sympify_mpz
converter[type(gmpy.mpq(1, 2))] = sympify_mpq
except ImportError:
pass
def sympify_mpmath(x):
return Expr._from_mpmath(x, x.context.prec)
converter[mpnumeric] = sympify_mpmath
def sympify_complex(a):
real, imag = list(map(sympify, (a.real, a.imag)))
return real + S.ImaginaryUnit*imag
converter[complex] = sympify_complex
_intcache[0] = S.Zero
_intcache[1] = S.One
_intcache[-1] = S.NegativeOne
from .power import Pow, integer_nthroot
from .mul import Mul
Mul.identity = One()
from .add import Add
Add.identity = Zero()
| gpl-3.0 | 5,382,836,522,082,844,000 | 28.597353 | 108 | 0.533299 | false |
pdeesawat/PSIT58_test_01 | Test_Python_code/final_code/Malaysia/epidemic.py | 1 | 2085 | import plotly.plotly as py
import plotly.graph_objs as go
#Get data
data = open('Real_Final_database_02.csv')
alldata = data.readlines()
listdata = []
for i in alldata:
listdata.append(i.strip().split(','))
#Seperate information
year = []
affect = []
damage = []
death =[]
for j in listdata:
if j[0] == 'Malaysia' and j[2] == 'Epidemic':
year.append(int(j[1]))
affect.append(int(j[3]))
damage.append(int(j[4]))
death.append(int(j[5]))
# Create and style traces
trace1 = go.Scatter(
x=year,
y=affect,
mode='lines+markers',
name="'Total Affected'",
hoverinfo='Total Affected',
line = dict(
shape='spline',
color = ('00CC00'),
width = 1.5),
)
trace2 = go.Scatter(
x=year,
y=damage,
mode='lines+markers',
name='Total Damage \'000 US',
hoverinfo='Total Damage \'000 US',
line = dict(
shape='spline',
color = ('3399FF'),
width = 1.5),
yaxis='y2'
)
trace3 = go.Scatter(
x=year,
y=death,
mode='lines+markers',
name='Total Death',
hoverinfo='Total Death',
line = dict(
shape='spline',
color = ('FF3300'),
width = 1.5),
yaxis='y3'
)
data = [trace1, trace2, trace3]
layout = go.Layout(
title='Epidemic in Malaysia',
yaxis=dict(
title='Total affected',
titlefont=dict(
color='00CC00'
),
tickfont=dict(
color='00CC00'
)
),
yaxis2=dict(
title='Total Damage \'000 US',
titlefont=dict(
color='3399FF'
),
tickfont=dict(
color='3399FF'
),
anchor='free',
overlaying='y',
side='left',
position=0.15
),
yaxis3=dict(
title='Total Death',
titlefont=dict(
color='FF3300'
),
tickfont=dict(
color='FF3300'
),
anchor='x',
overlaying='y',
side='right'
)
)
fig = go.Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='bennyy')
| apache-2.0 | -5,695,956,762,500,826,000 | 20.060606 | 49 | 0.52518 | false |
levilucio/SyVOLT | UMLRT2Kiltera_MM/transformation_reduced/Himesis/HState2ProcDef.py | 1 | 9626 |
from core.himesis import Himesis
import cPickle as pickle
from uuid import UUID
class HState2ProcDef(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HState2ProcDef.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HState2ProcDef, self).__init__(name='HState2ProcDef', num_nodes=51, edges=[])
# Add the edges
self.add_edges([(6, 10), (10, 13), (6, 11), (11, 14), (6, 12), (12, 15), (30, 20), (20, 7), (31, 21), (21, 41), (32, 22), (22, 42), (33, 23), (23, 43), (34, 24), (24, 44), (6, 25), (25, 46), (13, 26), (26, 47), (14, 27), (27, 48), (15, 28), (28, 49), (6, 29), (29, 50), (4, 0), (0, 16), (0, 17), (0, 18), (0, 19), (7, 8), (8, 40), (7, 9), (9, 45), (5, 1), (1, 3), (3, 2), (2, 45), (5, 4), (30, 35), (31, 36), (32, 37), (33, 38), (34, 39), (16, 6), (17, 13), (18, 14), (19, 15), (35, 46), (36, 47), (37, 48), (38, 49), (39, 50)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'UMLRT2Kiltera_MM'
p2
a.""")
self["name"] = """State2ProcDef"""
self["GUID__"] = UUID('78502d93-2aa8-4673-8040-443189b75867')
# Set the node attributes
self.vs[0]["mm__"] = """ApplyModel"""
self.vs[0]["GUID__"] = UUID('8add7e58-4f3a-4326-8990-2a01ed85c739')
self.vs[1]["mm__"] = """match_contains"""
self.vs[1]["GUID__"] = UUID('571f8dbb-475e-463e-8887-3f7cf0a0536d')
self.vs[2]["mm__"] = """hasAttribute_S"""
self.vs[2]["GUID__"] = UUID('ecba80aa-2e71-4866-a4b3-83158b720a88')
self.vs[3]["name"] = """state1"""
self.vs[3]["classtype"] = """State"""
self.vs[3]["mm__"] = """State"""
self.vs[3]["cardinality"] = """+"""
self.vs[3]["GUID__"] = UUID('a4696d60-f104-4f32-a6ee-cf3f67b679e0')
self.vs[4]["mm__"] = """paired_with"""
self.vs[4]["GUID__"] = UUID('7e8ebe3c-3991-470f-974b-2315ff9bf9a7')
self.vs[5]["mm__"] = """MatchModel"""
self.vs[5]["GUID__"] = UUID('5c8a6cec-5ca1-4dad-ab5b-bb155f5ca259')
self.vs[6]["name"] = """procdef1"""
self.vs[6]["classtype"] = """ProcDef"""
self.vs[6]["mm__"] = """ProcDef"""
self.vs[6]["cardinality"] = """1"""
self.vs[6]["GUID__"] = UUID('81d844d7-0d79-4799-a44f-c1d43ba7d462')
self.vs[7]["name"] = """concat1"""
self.vs[7]["mm__"] = """Concat"""
self.vs[7]["Type"] = """'String'"""
self.vs[7]["GUID__"] = UUID('1cda59bb-c9dc-46d6-adca-79ed83b12c26')
self.vs[8]["mm__"] = """hasArgs"""
self.vs[8]["GUID__"] = UUID('a34d1b7c-f900-4b28-af8f-a65196dbfae3')
self.vs[9]["mm__"] = """hasArgs"""
self.vs[9]["GUID__"] = UUID('9c701599-0c18-4d01-8cbf-ab7b4610abaa')
self.vs[10]["associationType"] = """channelNames"""
self.vs[10]["mm__"] = """directLink_T"""
self.vs[10]["GUID__"] = UUID('c12b0338-e593-4e2e-9069-f34fe10f872e')
self.vs[11]["associationType"] = """channelNames"""
self.vs[11]["mm__"] = """directLink_T"""
self.vs[11]["GUID__"] = UUID('7d2c8b2e-aeae-4f0a-8f3f-f4c58c0cc508')
self.vs[12]["associationType"] = """channelNames"""
self.vs[12]["mm__"] = """directLink_T"""
self.vs[12]["GUID__"] = UUID('5c74b636-4687-44ac-ac92-b2aefd438283')
self.vs[13]["name"] = """name1"""
self.vs[13]["classtype"] = """Name"""
self.vs[13]["mm__"] = """Name"""
self.vs[13]["cardinality"] = """1"""
self.vs[13]["GUID__"] = UUID('0362c5b2-f9da-4965-b9d7-f1b3cc727a7f')
self.vs[14]["name"] = """name2"""
self.vs[14]["classtype"] = """Name"""
self.vs[14]["mm__"] = """Name"""
self.vs[14]["cardinality"] = """1"""
self.vs[14]["GUID__"] = UUID('588a28b8-ad74-4546-ac72-dd6b6884fc82')
self.vs[15]["name"] = """name3"""
self.vs[15]["classtype"] = """Name"""
self.vs[15]["mm__"] = """Name"""
self.vs[15]["cardinality"] = """1"""
self.vs[15]["GUID__"] = UUID('52123033-ee9c-4e64-9383-affddac62ac0')
self.vs[16]["mm__"] = """apply_contains"""
self.vs[16]["GUID__"] = UUID('d492c643-4c79-4964-844d-147da9b489ae')
self.vs[17]["mm__"] = """apply_contains"""
self.vs[17]["GUID__"] = UUID('50c18e94-e19b-42b3-8b33-1d96a37f08c6')
self.vs[18]["mm__"] = """apply_contains"""
self.vs[18]["GUID__"] = UUID('e3986ffb-9c82-4951-a7e7-e9abfd5fd1ef')
self.vs[19]["mm__"] = """apply_contains"""
self.vs[19]["GUID__"] = UUID('938616b7-fe71-40eb-ad7c-1cc5dc54a50d')
self.vs[20]["mm__"] = """rightExpr"""
self.vs[20]["GUID__"] = UUID('bab200f5-bc28-4dd8-8af6-7e765d53c30c')
self.vs[21]["mm__"] = """rightExpr"""
self.vs[21]["GUID__"] = UUID('12c09355-c612-4dae-b1e2-b2d1f9ab150e')
self.vs[22]["mm__"] = """rightExpr"""
self.vs[22]["GUID__"] = UUID('91c1ceeb-d4dc-4c0b-a717-67e0519b417f')
self.vs[23]["mm__"] = """rightExpr"""
self.vs[23]["GUID__"] = UUID('29b2deda-85e6-4024-bcd7-122e41b64440')
self.vs[24]["mm__"] = """rightExpr"""
self.vs[24]["GUID__"] = UUID('1ea30f45-e180-42d6-a5f2-e87113611af7')
self.vs[25]["mm__"] = """hasAttribute_T"""
self.vs[25]["GUID__"] = UUID('3c5b17b8-cedd-4b1d-b791-fe5c680c7db0')
self.vs[26]["mm__"] = """hasAttribute_T"""
self.vs[26]["GUID__"] = UUID('bcc74f78-d666-48de-8ed4-0343795d79b9')
self.vs[27]["mm__"] = """hasAttribute_T"""
self.vs[27]["GUID__"] = UUID('7e9aaeaa-c6e6-4d5d-b832-9d1df7a2eff3')
self.vs[28]["mm__"] = """hasAttribute_T"""
self.vs[28]["GUID__"] = UUID('f0ba2922-0963-41c8-b5ba-81d2e47db1ca')
self.vs[29]["mm__"] = """hasAttribute_T"""
self.vs[29]["GUID__"] = UUID('a9d1751b-4308-4494-a6a4-01f211c8a62b')
self.vs[30]["name"] = """eq1"""
self.vs[30]["mm__"] = """Equation"""
self.vs[30]["GUID__"] = UUID('67a49252-5855-44d8-9490-0a3dbce5bfed')
self.vs[31]["name"] = """eq2"""
self.vs[31]["mm__"] = """Equation"""
self.vs[31]["GUID__"] = UUID('5c871fcb-179f-48cd-9229-fa803c819574')
self.vs[32]["name"] = """eq3"""
self.vs[32]["mm__"] = """Equation"""
self.vs[32]["GUID__"] = UUID('9a99a013-116f-4d40-a12b-039674f32b93')
self.vs[33]["name"] = """eq4"""
self.vs[33]["mm__"] = """Equation"""
self.vs[33]["GUID__"] = UUID('532c9f20-da83-4d35-953d-7223fcd22907')
self.vs[34]["name"] = """eq5"""
self.vs[34]["mm__"] = """Equation"""
self.vs[34]["GUID__"] = UUID('0bb6282c-5410-4de6-9dc2-911d5aae0067')
self.vs[35]["mm__"] = """leftExpr"""
self.vs[35]["GUID__"] = UUID('1626c837-74d4-4cc4-844e-ce1536bc8fac')
self.vs[36]["mm__"] = """leftExpr"""
self.vs[36]["GUID__"] = UUID('9ffca102-e8fe-4cfc-ab77-26d881a2f076')
self.vs[37]["mm__"] = """leftExpr"""
self.vs[37]["GUID__"] = UUID('de798926-96c9-4f33-83a4-cc114da7a36b')
self.vs[38]["mm__"] = """leftExpr"""
self.vs[38]["GUID__"] = UUID('81eb325a-97f6-48e1-b44a-74011ab6e078')
self.vs[39]["mm__"] = """leftExpr"""
self.vs[39]["GUID__"] = UUID('4b9a4f62-5702-4269-844f-3e5357d0c848')
self.vs[40]["name"] = """S"""
self.vs[40]["mm__"] = """Constant"""
self.vs[40]["Type"] = """'String'"""
self.vs[40]["GUID__"] = UUID('0c6ac74c-b589-4d65-8ad3-3075c90b2f23')
self.vs[41]["name"] = """enp"""
self.vs[41]["mm__"] = """Constant"""
self.vs[41]["Type"] = """'String'"""
self.vs[41]["GUID__"] = UUID('b94a46a1-5232-4363-8d12-c7c6d1d72748')
self.vs[42]["name"] = """exit"""
self.vs[42]["mm__"] = """Constant"""
self.vs[42]["Type"] = """'String'"""
self.vs[42]["GUID__"] = UUID('2a4230cf-b625-4933-9b71-e2ae36698897')
self.vs[43]["name"] = """exack"""
self.vs[43]["mm__"] = """Constant"""
self.vs[43]["Type"] = """'String'"""
self.vs[43]["GUID__"] = UUID('879d6bb5-cb9d-46f9-9100-f05a4177482c')
self.vs[44]["name"] = """procdef"""
self.vs[44]["mm__"] = """Constant"""
self.vs[44]["Type"] = """'String'"""
self.vs[44]["GUID__"] = UUID('cf8854ef-ff6d-4148-8fe4-a5eb17775c95')
self.vs[45]["name"] = """name"""
self.vs[45]["mm__"] = """Attribute"""
self.vs[45]["Type"] = """'String'"""
self.vs[45]["GUID__"] = UUID('afdcec56-a4b5-49eb-a91f-4782236a94fa')
self.vs[46]["name"] = """name"""
self.vs[46]["mm__"] = """Attribute"""
self.vs[46]["Type"] = """'String'"""
self.vs[46]["GUID__"] = UUID('d1ded168-67b3-46b8-9884-6922ff5fe0b4')
self.vs[47]["name"] = """literal"""
self.vs[47]["mm__"] = """Attribute"""
self.vs[47]["Type"] = """'String'"""
self.vs[47]["GUID__"] = UUID('8cb890c3-999d-44c9-9c8a-e14cc1bcbb54')
self.vs[48]["name"] = """literal"""
self.vs[48]["mm__"] = """Attribute"""
self.vs[48]["Type"] = """'String'"""
self.vs[48]["GUID__"] = UUID('cc294ea7-ee66-4076-8d9e-df2684988b60')
self.vs[49]["name"] = """literal"""
self.vs[49]["mm__"] = """Attribute"""
self.vs[49]["Type"] = """'String'"""
self.vs[49]["GUID__"] = UUID('f1adcf7d-4b56-4840-840c-7d83949dd7c9')
self.vs[50]["name"] = """pivotout"""
self.vs[50]["mm__"] = """Attribute"""
self.vs[50]["Type"] = """'String'"""
self.vs[50]["GUID__"] = UUID('8204b7c1-c62e-467f-90e0-6c2931f8d9a5')
| mit | 1,067,458,510,316,786,800 | 53.384181 | 536 | 0.511947 | false |
nyu-dl/WebNav | convert2emb.py | 1 | 2769 | '''
Convert article's text in the dataset to word embeddings using a pretrained word2vec dictionary.
'''
import h5py
import numpy as np
from nltk.tokenize import wordpunct_tokenize
import nltk
import utils
import cPickle as pkl
import os
import parameters as prm
import time
def compute_emb(pages_path_in, pages_path_out, vocab):
wemb = pkl.load(open(prm.wordemb_path, 'rb'))
dim_emb = wemb[wemb.keys()[0]].shape[0]
W = 0.01 * np.random.randn(len(vocab), dim_emb).astype(np.float32)
for word, pos in vocab.items():
if word in wemb:
W[pos,:] = wemb[word]
f = h5py.File(pages_path_in, 'r')
if prm.att_doc and prm.att_segment_type == 'sentence':
nltk.download('punkt')
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
os.remove(pages_path_out) if os.path.exists(pages_path_out) else None
# Save to HDF5
fout = h5py.File(pages_path_out,'a')
if prm.att_doc:
shape = (f['text'].shape[0],prm.max_segs_doc,prm.dim_emb)
else:
shape=(f['text'].shape[0],prm.dim_emb)
embs = fout.create_dataset('emb', shape=shape, dtype=np.float32)
mask = fout.create_dataset('mask', shape=(f['text'].shape[0],), dtype=np.float32)
i = 0
for text in f['text']:
st = time.time()
if prm.att_doc:
if prm.att_segment_type.lower() == 'section' or prm.att_segment_type.lower() == 'subsection':
segs = ['']
for line in text.split('\n'):
if prm.att_segment_type == 'section':
line = line.replace('===', '')
if line.strip().startswith('==') and line.strip().endswith('=='):
segs.append('')
segs[-1] += line + '\n'
elif prm.att_segment_type.lower() == 'sentence':
segs = tokenizer.tokenize(text.decode('ascii', 'ignore'))
elif prm.att_segment_type.lower() == 'word':
segs = wordpunct_tokenize(text.decode('ascii', 'ignore'))
else:
raise ValueError('Not a valid value for the attention segment type (att_segment_type) parameter. Valid options are "section", "subsection", "sentence" or "word".')
segs = segs[:prm.max_segs_doc]
emb_ = utils.Word2Vec_encode(segs, wemb)
embs[i,:len(emb_),:] = emb_
mask[i] = len(emb_)
else:
bow0, bow1 = utils.BOW(wordpunct_tokenize(text.lower()), vocab)
emb = (W[bow0] * bow1[:,None]).sum(0)
embs[i,:] = emb
i += 1
#if i > 3000:
# break
if i % prm.dispFreq == 0:
print 'processing article', i, 'time', time.time()-st
f.close()
fout.close()
| bsd-3-clause | -5,497,543,181,166,937,000 | 34.5 | 179 | 0.558685 | false |
SysTheron/adhocracy | src/adhocracy/lib/democracy/delegation_node.py | 1 | 13057 | from datetime import datetime
import logging
from sqlalchemy import or_
from adhocracy import model
from adhocracy.model import Delegation
log = logging.getLogger(__name__)
# REFACT: Rename: DelegationTraverser? Maybe thats also a new object
# that does the traversing in different ways
# That may become a Strategy object on how the delegation should be traversed
# REFACT: Extract: DelegationUserGraph, DelegationTopicGraph,
# perhaps DelegationGraph as common superclass
# This object should represent a whole delegation graph from different
# points of view
# One Goal would be to be able to load the whole subgraph from the db
# in as few queries as possible
# Optimally just one...
# Maybe there will just be one DelegationGraph with different Strategies
# of how create/ traverse/ filter it attached
# Also it needs to be possible to ask the graph for the status at a
# specific time (all objects have a deleted_at property or it can be
# determined when they are overridden by a later choice)
# Some problems with this class:
# - many of the methods work across multiple nodes and layers of the graph
# - many of the methods take additional parameters to determine when to
# filter this graph for different criteria
# - there are methods that work on the whole graph (some as class-methods)
# - Forward and backward traversal are in the same object
# - it is oblivious as to why the graph is traversed, either to allow
# all agents to place their vote
# or to find out what the delegation wheight of a specific user
# is in a specific context
class DelegationNode(object):
"""
A ``DelegationNode`` describes a part of the voting delegation graph
sorrounding a ``Delegateable`` (i.e. a ``Category``, ``Issue`` or
``Proposal``) and a ``User``.
Right now the delegation graph is a graph of incomming and outgoing
delegations on multiple levels - one level per scope.
Each DelegationNode represents the incomming and outgoing delegations
of one user on one level (scope/delegateable) in this graph.
**TODO:** Developing a good caching strategy for this class would be
useful in order to cache the delegation graph to memcached.
:param user: The ``User`` at the center of this ``DelegationNode``.
:param delegateable: A ``Delegateable``.
"""
def __init__(self, user, delegateable):
self.user = user
self.delegateable = delegateable
def _query_traverse(self, querymod, recurse, at_time=None):
if not at_time: # shouldn't this be if at_time is None: ?
at_time = datetime.utcnow()
query = model.meta.Session.query(Delegation)
query = query.filter(Delegation.scope == self.delegateable)
query = query.filter(Delegation.create_time <= at_time)
query = query.filter(or_(Delegation.revoke_time == None, # noqa
Delegation.revoke_time > at_time))
query = querymod(query)
delegations = query.all()
if recurse:
for parent in self.delegateable.parents:
node = DelegationNode(self.user, parent)
delegations += node._query_traverse(querymod, recurse, at_time)
return delegations
def inbound(self, recurse=True, at_time=None,
is_counting_delegations=False):
"""
Retrieve all inbound delegations (i.e. those that the user has received
from other users in order to vote on their behalf) that apply to the
``Delegateable``.
:param recurse: if ``True``, search will include delegations on parent
``Delegateables`` in breadth-first traversal order.
:param at_time: return the delegation graph at the given time, defaults
to the current time.
"""
delegations = self._query_traverse(
lambda q: q.filter(Delegation.agent == self.user),
recurse, at_time)
delegations = self._filter_out_overriden_delegations(delegations)
if is_counting_delegations:
delegations = self._filter_out_delegations_where_a_more_specific_delegation_exists(delegations)
delegations = self._filter_out_delegations_that_are_overriden_by_direct_votes(delegations)
return delegations
def transitive_inbound(self, recurse=True, at_time=None, _path=None,
is_counting_delegations=False):
"""
Retrieve inbound delegations recursing through the delegation graph
as well as through the category tree.
:param recurse: if ``True``, search will include delegations on parent
``Delegateables`` in breadth-first traversal order.
:param at_time: return the delegation graph at the given time, defaults
to the current time.
:returns: list of ``Delegation``
"""
if _path is None:
_path = []
elif self.user in _path:
return [] # we already visited this node
# circle detection uses this path of visited nodes
_path.append(self.user)
delegations = self.inbound(
recurse=recurse, at_time=at_time,
is_counting_delegations=is_counting_delegations)
for delegation in list(delegations):
ddnode = DelegationNode(delegation.principal, self.delegateable)
additional_delegations = ddnode.transitive_inbound(
recurse=recurse, at_time=at_time, _path=_path,
is_counting_delegations=is_counting_delegations)
for additional_delegation in additional_delegations:
if additional_delegation.principal in _path:
continue # this is a delegation from a node we
# already visited
else:
delegations.append(additional_delegation)
# _path is used as a stack in the recursion - so we need to remove
# what we added in going into the recursion
_path.remove(self.user)
return delegations
def outbound(self, recurse=True, at_time=None, filter=True):
"""
Retrieve all outbound delegations (i.e. those that the user has given
to other users in order allow them to vote on his/her behalf) that
apply to the ``Delegateable``.
:param recurse: if ``True``, search will include delegations on parent
``Delegateables`` in breadth-first traversal order.
:param at_time: return the delegation graph at the given time, defaults
to the current time.
:returns: list of ``Delegation``
"""
delegations = self._query_traverse(
lambda q: q.filter(Delegation.principal == self.user),
recurse, at_time)
if filter:
by_agent = dict()
for delegation in set(delegations):
by_agent[delegation.agent] = (
by_agent.get(delegation.agent, []) + [delegation])
delegations = [self.filter_less_specific_delegations(ds)[0] for
ds in by_agent.values()]
return delegations
# TODO: consider to add a transitive-outbound to know where the vote
# will end up for a specific issue
# REFACT: rename propagate_vote_to_delegators?
def propagate(self, callable, _edge=None, _propagation_path=None):
"""
Propagate a given action along the delegation graph *against*
its direction, i.e. from the agent node towards its principal.
This is the natural direction to propagate actions along this
network since it allows principals to reproduce the actions of
their agents.
Propagation will abort on circular dependencies but has no
recursion depth limit.
:param callable: A callable that is to be called on each node.
It must take three arguments, a ``User``, a
``Delegateable`` and the ``Delegation``
which served as a transitory edge during the
last step of the propagation.
:returns: a list of all results produced by the callable.
"""
if not _propagation_path:
_propagation_path = [self]
elif self in _propagation_path:
return []
else:
_propagation_path.append(self)
result = [callable(self.user, self.delegateable, _edge)]
if not self.delegateable.instance.allow_delegate:
return result
for delegation in self.inbound():
node = DelegationNode(delegation.principal, self.delegateable)
result += node.propagate(callable,
_edge=delegation,
_propagation_path=_propagation_path)
return result
def number_of_delegations(self):
return len(self.transitive_inbound(is_counting_delegations=True))
def __repr__(self):
return "<DelegationNode(%s,%s)>" % (self.user.user_name,
self.delegateable.id)
def __eq__(self, other):
return self.user == other.user and \
self.delegateable == other.delegateable
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def create_delegation(cls, from_user, to_user, scope):
delegation = model.Delegation(from_user, to_user, scope)
# dwt: why do I need to add the delegation to the session here?
# it should just be added via the relation it has to the user and
# either not be in the session at all or automatically via the
# user object
model.meta.Session.add(delegation)
# dwt: Why is the flush here neccessary? - supplies the id
# of course - but is that needed?
model.meta.Session.flush()
return delegation
@classmethod
def filter_less_specific_delegations(cls, delegations):
"""
Given a set of delegations, remove those that are overriden by others.
A delegation is overridden whenever there is another delegation with a
narrower scope that still applies.
:param delegations: The list of delegations that are to be filtered.
They need to all be be from the same principal.
:returns: A filtered list of delegations.
"""
matches = list(delegations)
for d in delegations:
for m in matches:
if m.scope.is_super(d.scope):
matches.remove(m)
return matches
def _filter_out_overriden_delegations(self, delegations):
# return delegations
by_principal = dict()
for delegation in set(delegations):
by_principal[delegation.principal] = by_principal.get(
delegation.principal, []) + [delegation]
return [self.filter_less_specific_delegations(ds)[0] for
ds in by_principal.values()]
def _filter_out_delegations_that_are_overriden_by_direct_votes(
self, delegations):
from adhocracy.lib.democracy.decision import Decision
def is_overriden_by_own_decision(delegation):
if not hasattr(delegation.scope, 'poll'):
return True # scope doesn't have polls -> can't self decide
if delegation.scope.poll is None:
return True # currently no poll in this cope -> can't
# self decide
decision = Decision(delegation.principal, delegation.scope.poll)
return not decision.is_self_decided()
return filter(is_overriden_by_own_decision, delegations)
# REFACT: this method apears to do the same as
# filter_less_specific_delegations (modulo the pre-work
# that happens before it is called)
def _filter_out_delegations_where_a_more_specific_delegation_exists(
self, delegations):
def is_overriden_by_other_delegation(delegation):
node = DelegationNode(delegation.principal, self.delegateable)
outbound_delegations = node.outbound()
if 1 == len(outbound_delegations):
# If this returns false, the data model is invalid!
return outbound_delegations[0].agent == self.user
elif len(outbound_delegations) > 1:
smallest_delegations = [outbound_delegations[0]]
for delegation in outbound_delegations:
scope = smallest_delegations[0].scope
if scope.is_super(delegation.scope):
smallest_delegations = [delegation]
elif scope == delegation.scope:
smallest_delegations.append(delegation)
for delegation in smallest_delegations:
if delegation.agent == self.user:
return True
return False
return filter(is_overriden_by_other_delegation, delegations)
| agpl-3.0 | -6,489,310,189,579,756,000 | 43.111486 | 107 | 0.630543 | false |
uwcirg/true_nth_usa_portal | portal/models/coredata.py | 1 | 15330 | """Coredata Module
Core is a rather ambigious term - includes upfront questions such
as DOB and patient / staff role. Basic diagnosis and procedure
questions.
Interventions will sometimes require their own set of data, for which the
`/api/coredata/*` endpoints exist.
"""
from abc import ABCMeta, abstractmethod
import sys
from flask import current_app
from .audit import Audit
from .clinical_constants import CC
from .intervention import INTERVENTION, UserIntervention
from .organization import Organization, OrgTree
from .procedure_codes import (
known_treatment_not_started,
known_treatment_started,
)
from .role import ROLE
from .tou import ToU
class Coredata(object):
"""Singleton managing coredata **model** logic, mostly shortcuts"""
class __singleton(object):
"""Hidden inner class defines all the public methods
Outer class accessors wrap, so any calls hit the single
instance and appear at outer class scope.
"""
def __init__(self):
self._registered = []
def register_class(self, cls):
if cls not in self._registered:
self._registered.append(cls)
def required(self, user, **kwargs):
# Returns list of datapoints required for user
items = []
for cls in self._registered:
instance = cls()
if instance.required(user, **kwargs):
items.append(instance.id)
return items
def optional(self, user, **kwargs):
# Returns list of optional datapoints for user
items = []
for cls in self._registered:
instance = cls()
if instance.optional(user, **kwargs):
items.append(instance.id)
return items
def initial_obtained(self, user, **kwargs):
# Check if all registered methods have data
for cls in self._registered:
instance = cls()
if not instance.required(user, **kwargs):
continue
if instance.hasdata(user, **kwargs):
continue
current_app.logger.debug(
'intial NOT obtained for at least {}'.format(cls.__name__))
return False
return True
def still_needed(self, user, **kwargs):
# Returns list of {field, collection_method} still needed
needed = []
for cls in self._registered:
instance = cls()
if not instance.required(user, **kwargs):
continue
if not instance.hasdata(user, **kwargs):
d = {'field': instance.id}
method = instance.collection_method(user, **kwargs)
if method:
d['collection_method'] = method
needed.append(d)
if needed:
current_app.logger.debug(
'initial still needed for {}'.format(
[i['field'] for i in needed]))
return needed
instance = None
def __new__(cls):
if not Coredata.instance:
Coredata.instance = Coredata.__singleton()
return Coredata.instance
@staticmethod
def reset():
del Coredata.instance
Coredata.instance = None
def __getattr__(self, name):
"""Delegate to hidden inner class"""
return getattr(self.instance, name)
def __setattr__(self, name, value):
"""Delegate to hidden inner class"""
return setattr(self.instance, name, value)
class CoredataPoint(object, metaclass=ABCMeta):
"""Abstract base class - defining methods each datapoint needs"""
@abstractmethod
def required(self, user, **kwargs):
"""Returns true if required for user, false otherwise
Applications are configured to request a set of core data points.
This method returns True if the active configuration includes the
datapoint for the user, regardless of whether or not a value has
been acquired. i.e., should the user ever be asked for this point,
or should the control be hidden regardless of the presence of data.
NB - the user's state is frequently considered. For example,
belonging to an intervention or organization may imply the datapoint
should never be an available option for the user to set.
Optional and required are mutually exclusive - an item may not be in
either for a user, but it shouldn't be in both.
"""
raise NotImplemented
@abstractmethod
def optional(self, user, **kwargs):
"""Returns true if optional for user, false otherwise
Applications are configured to request a set of core data points.
This method returns True if the active configuration includes the
datapoint for the user, regardless of whether or not a value has
been acquired. i.e., should the user ever be asked for this point,
or should the control be hidden regardless of the presence of data.
NB - the user's state is frequently considered. For example,
belonging to an intervention or organization may imply the datapoint
should never be an available option for the user to set.
Optional and required are mutually exclusive - an item may not be in
either for a user, but it shouldn't be in both.
"""
raise NotImplemented
@abstractmethod
def hasdata(self, user, **kwargs):
"""Returns true if the data has been obtained, false otherwise"""
raise NotImplemented
@property
def id(self):
"""Returns identifier for class - namely lowercase w/o Data suffix"""
name = self.__class__.__name__
return name[:-4].lower()
def collection_method(self, user, **kwargs):
"""Returns None unless the item has a specialized method"""
return None
def CP_user(user):
"""helper to determine if the user has Care Plan access"""
return UserIntervention.user_access_granted(
user_id=user.id,
intervention_id=INTERVENTION.CARE_PLAN.id)
def SR_user(user):
"""helper to determine if the user has Sexual Recovery access"""
return UserIntervention.user_access_granted(
user_id=user.id,
intervention_id=INTERVENTION.SEXUAL_RECOVERY.id)
def IRONMAN_user(user):
"""helper to determine if user is associated with the IRONMAN org"""
# NB - not all systems have this organization!
iron_org = Organization.query.filter_by(name='IRONMAN').first()
if iron_org:
OT = OrgTree()
for org_id in (o.id for o in user.organizations if o.id):
top_of_org = OT.find(org_id).top_level()
if top_of_org == iron_org.id:
return True
return False
def enter_manually_interview_assisted(user, **kwargs):
"""helper to determine if we're in `enter manually - interview assisted`
Looks for 'entry_method' in kwargs - returns true if it has value
'interview assisted', false otherwise.
"""
return kwargs.get('entry_method') == 'interview assisted'
def enter_manually_paper(user, **kwargs):
"""helper to determine if we're in `enter manually - paper`
Looks for 'entry_method' in kwargs - returns true if it has value
'paper', false otherwise.
"""
return kwargs.get('entry_method') == 'paper'
# Series of "datapoint" collection classes follow
class DobData(CoredataPoint):
def required(self, user, **kwargs):
# DOB is only required for patient
if user.has_role(ROLE.PATIENT.value):
return True
return False
def optional(self, user, **kwargs):
# Optional for anyone, for whom it isn't required
return not self.required(user)
def hasdata(self, user, **kwargs):
return user.birthdate is not None
class RaceData(CoredataPoint):
def required(self, user, **kwargs):
return False
def optional(self, user, **kwargs):
if SR_user(user):
return False
if IRONMAN_user(user):
return False
if user.has_role(ROLE.PATIENT.value):
return True
return False
def hasdata(self, user, **kwargs):
return user.races.count() > 0
class EthnicityData(CoredataPoint):
def required(self, user, **kwargs):
return False
def optional(self, user, **kwargs):
if SR_user(user):
return False
if IRONMAN_user(user):
return False
if user.has_role(ROLE.PATIENT.value):
return True
return False
def hasdata(self, user, **kwargs):
return user.ethnicities.count() > 0
class IndigenousData(CoredataPoint):
def required(self, user, **kwargs):
return False
def optional(self, user, **kwargs):
if SR_user(user):
return False
if IRONMAN_user(user):
return False
if user.has_role(ROLE.PATIENT.value):
return True
return False
def hasdata(self, user, **kwargs):
return user.indigenous.count() > 0
class RoleData(CoredataPoint):
def required(self, user, **kwargs):
return not SR_user(user)
def optional(self, user, **kwargs):
return False
def hasdata(self, user, **kwargs):
if len(user.roles) > 0:
return True
class OrgData(CoredataPoint):
def required(self, user, **kwargs):
if SR_user(user) or CP_user(user):
return False
if user.has_role(
ROLE.PATIENT.value, ROLE.STAFF.value,
ROLE.STAFF_ADMIN.value):
return True
return False
def optional(self, user, **kwargs):
return False
def hasdata(self, user, **kwargs):
return len(user.organizations) > 0
class ClinicalData(CoredataPoint):
def required(self, user, **kwargs):
if SR_user(user):
return False
return user.has_role(ROLE.PATIENT.value)
def optional(self, user, **kwargs):
return False
def hasdata(self, user, **kwargs):
required = {item: False for item in (
CC.BIOPSY, CC.PCaDIAG)}
for obs in user.observations:
if obs.codeable_concept in required:
required[obs.codeable_concept] = True
return all(required.values())
class TreatmentData(CoredataPoint):
def required(self, user, **kwargs):
if SR_user(user):
return False
return user.has_role(ROLE.PATIENT.value)
def optional(self, user, **kwargs):
return False
def hasdata(self, user, **kwargs):
# procedure known to have started or not started by the user
return known_treatment_not_started(user) or \
known_treatment_started(user)
class LocalizedData(CoredataPoint):
def required(self, user, **kwargs):
if SR_user(user):
return False
if current_app.config.get('LOCALIZED_AFFILIATE_ORG'):
# Some systems use organization affiliation to denote localized
# on these systems, we don't ask about localized - let
# the org check worry about that
return False
return user.has_role(ROLE.PATIENT.value)
def optional(self, user, **kwargs):
return False
def hasdata(self, user, **kwargs):
for obs in user.observations:
if obs.codeable_concept == CC.PCaLocalized:
return True
return False
class NameData(CoredataPoint):
def required(self, user, **kwargs):
return not SR_user(user)
def optional(self, user, **kwargs):
return not self.required(user)
def hasdata(self, user, **kwargs):
return user.first_name and user.last_name
class TOU_core(CoredataPoint):
"""The flavors of Terms Of Use inherit from here to define the 'type'"""
def required(self, user, **kwargs):
return not SR_user(user)
def optional(self, user, **kwargs):
return False
def hasdata(self, user, **kwargs):
return ToU.query.join(Audit).filter(
Audit.subject_id == user.id,
ToU.type == self.tou_type,
ToU.active.is_(True)).count() > 0
def collection_method(self, user, **kwargs):
"""TOU collection may be specialized"""
# if the user's top_level_org is associated with
# ACCEPT_TERMS_ON_NEXT_ORG - the collection method
# is "ACCEPT_ON_NEXT"
org = current_app.config.get('ACCEPT_TERMS_ON_NEXT_ORG')
if org:
org = Organization.query.filter_by(name=org).one()
if org and user.first_top_organization() == org:
return "ACCEPT_ON_NEXT"
return None
class Website_Terms_Of_UseData(TOU_core):
tou_type = 'website terms of use'
def required(self, user, **kwargs):
if (not super(self.__class__, self).required(user, **kwargs) or
enter_manually_paper(user, **kwargs) or
enter_manually_interview_assisted(user, **kwargs)):
return False
return True
class Subject_Website_ConsentData(TOU_core):
tou_type = 'subject website consent'
def required(self, user, **kwargs):
if not super(self.__class__, self).required(user, **kwargs):
return False
return user.has_role(ROLE.PATIENT.value)
class Stored_Website_Consent_FormData(TOU_core):
tou_type = 'stored website consent form'
def required(self, user, **kwargs):
if (not super(self.__class__, self).required(user, **kwargs) or
not enter_manually_interview_assisted(user, **kwargs)):
return False
return user.has_role(ROLE.PATIENT.value)
class Privacy_PolicyData(TOU_core):
tou_type = 'privacy policy'
def required(self, user, **kwargs):
if (not super(self.__class__, self).required(user, **kwargs) or
enter_manually_interview_assisted(user, **kwargs) or
enter_manually_paper(user, **kwargs)):
return False
return True
def configure_coredata(app):
"""Configure app for coredata checks"""
coredata = Coredata()
# Add static list of "configured" datapoints
config_datapoints = app.config.get(
'REQUIRED_CORE_DATA', [
'name', 'dob', 'role', 'org', 'clinical', 'localized',
'treatment', 'race', 'ethnicity', 'indigenous',
'website_terms_of_use', 'subject_website_consent',
'stored_website_consent_form', 'privacy_policy',
])
for name in config_datapoints:
# Camel case with 'Data' suffix - expect to find class in local
# scope or raise exception
cls_name = name.title() + 'Data'
try:
# limit class loading to this module - die if not found
cls = getattr(sys.modules[__name__], cls_name)
except AttributeError as e:
app.logger.error(
"Configuration for REQUIRED_CORE_DATA includes "
"unknown element '{}' - can't continue".format(name))
raise e
coredata.register_class(cls)
| bsd-3-clause | -2,850,778,395,077,922,000 | 30.285714 | 79 | 0.607502 | false |
ctengiz/MkUtils | mkutils/mkupload.py | 1 | 5316 | # -*- coding: utf-8 -*-
"""
Helper functions to assist file upload to db using boottle, sqlachemy
Requires specific class definition as :
class Upload(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_upload'), primary_key=True)
uuid = Column(String(40), unique=True)
file_name = Column(String(200))
ext = Column(String(20))
cnt = deferred(Column(BLOB))
cnt_th = deferred(Column(BLOB))
notes = Column(TEXT)
mimetype = Column(String(100))
__author__ : Çağatay Tengiz
__date__ : 10.01.2014
"""
from uuid import uuid1
import mimetypes
import os
from PIL import Image
from sqlalchemy import Sequence
import io
def mime_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def normalize_filename(afilename):
return afilename.replace(' ', '_').\
replace('ç', 'c').replace('Ç', 'c').\
replace('ı', 'i').replace('İ', 'i').\
replace('ğ', 'g').replace('Ğ', 'g').\
replace('ü', 'u').replace('Ü', 'u').\
replace('ş', 's').replace('Ş', 's').\
replace('ö', 'o').replace('Ö', 'o').\
lower().\
decode('utf-8')
def resize_image(img, basewidth, thumb=False):
if type(img) == str:
#resmi file like in memory nesneye atalım
fx = io.StringIO(img)
else:
img.seek(0)
fx = img
#daha sonra o temp dosyadan image'ı okuyoruz
temp_img = Image.open(fx)
#resmin type'ını alıyoruz (jpeg,png,gif etc.) #imghdr.what(fx)
file_type = temp_img.format
if thumb:
size = basewidth, basewidth
temp_img.thumbnail(size, Image.ANTIALIAS)
else:
if temp_img.size[0] >= basewidth:
wpercent = (basewidth / float(temp_img.size[0]) )
hsize = int((float(temp_img.size[1]) * float(wpercent)))
temp_img = temp_img.resize((basewidth, hsize), Image.ANTIALIAS)
#image nesnesini buffer'a alıp string'e çeviriyoruz
buf = io.BytesIO()
temp_img.save(buf, file_type)
#tempImg.save(buf, fileType, optimize=True, quality=80)
return buf.getvalue()
def upload_file(db,
upload_class,
afile,
defi,
old_id=None,
upload_path="",
thumb_size=200,
image_size=1024,
store_in_db=False):
if not afile:
return 0
if old_id:
arw = db.query(upload_class).filter(upload_class.id == old_id).first()
uuid = arw.uuid
else:
arw = upload_class()
db.add(arw)
arw.id = db.execute(Sequence('gn_upload'))
uuid = uuid1()
#split and chooses the last part (the filename with extension)
_filename = afile.filename.replace('\\', '/').split('/')[-1]
_ext = os.path.splitext(_filename)[1].lower()
_is_image = _ext in ['.jpg', '.jpeg', '.png', '.bmp', '.gif']
arw.uuid = uuid
arw.file_name = _filename
arw.ext = _ext
arw.mimetype = mime_type(_filename)
arw.defi = defi
if _is_image:
try:
thumb_size = int(thumb_size)
image_size = int(image_size)
except:
thumb_size=200
image_size=1024
cnt_thb = resize_image(afile.file, thumb_size, True) #thumbnail
cnt_bin = resize_image(afile.file, image_size) #Orjinal Boy
else:
afile.file.seek(0)
cnt_bin = afile.file.read()
cnt_thb = None
#write the file to disk
if store_in_db:
arw.cnt = cnt_bin
arw.cnt_th = cnt_thb
else:
save_path = upload_path
file_path = "{path}/{file}".format(path=save_path, file=uuid)
thumb_path = "{path}/th_{file}".format(path=save_path, file=uuid)
fout = open(file_path, 'wb')
fout.write(cnt_bin)
fout.close()
#Write thumbnail to disk
if cnt_thb:
fout = open(thumb_path, 'wb')
fout.write(cnt_thb)
fout.close()
db.commit()
return arw.id, uuid
def convert_db_images():
from sqlalchemy.orm import scoped_session, sessionmaker
dbsession = scoped_session(sessionmaker(bind=dbcon.engine))
imgs = dbsession.query(dbcon.CmsUpload).all()
for x in imgs:
if x.cnt:
cnt_thb = resize_image(x.cnt, 200, True)
#write the file to disk
fout = open(config.BASE_PATH + '/uploads/th_' + x.uuid, 'wb')
fout.write(cnt_thb)
fout.close()
cnt2 = resize_image(x.cnt, 1024, True)
#write the file to disk
fout = open(config.BASE_PATH + '/uploads/' + x.uuid, 'wb')
fout.write(cnt2)
fout.close()
def convert_dir_images():
imgs = os.listdir(config.BASE_PATH + '/uploads')
for x in imgs:
if x[0] != 't':
fl = open(config.BASE_PATH + '/uploads/' + x, 'rb')
cnt_thb = resize_image(fl, 200, True)
#write the file to disk
fout = open(config.BASE_PATH + '/uploads/th_' + x, 'wb')
fout.write(cnt_thb)
fout.close()
cnt2 = resize_image(fl, 1024, True)
#write the file to disk
fout = open(config.BASE_PATH + '/uploads/' + x, 'wb')
fout.write(cnt2)
fout.close()
| mit | 3,934,317,726,200,146,400 | 27.467742 | 78 | 0.555619 | false |
theodi/signin-web | RFIDiot_Bits/odi-multiselect.py | 1 | 1846 | #!/usr/bin/python
# odi-multiselect.py - continuously read cards and post numbers to defined REST endpoint
#
# Adapted from multiselect.py, pacakaged as part of the RFIDiot library. (c) Adam Laurie
#
# This code is copyright (c) David Tarrant, 2013, All rights reserved.
# The following terms apply:
#
# This code is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import rfidiot
import sys
import os
import time
import string
import httplib
import urllib
import subprocess
from subprocess import Popen
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
try:
card= rfidiot.card
except:
os._exit(True)
args= rfidiot.args
card.info('multiselect v0.1n')
# force card type if specified
if len(args) == 1:
if not card.settagtype(args[0]):
print 'Could not set tag type'
os._exit(True)
else:
card.settagtype(card.ALL)
while 42:
if card.select():
print ' Tag ID: ' + card.uid,
params = urllib.urlencode({'action': "keycard", 'keycard_id': card.uid})
conn = httplib.HTTPConnection("signin.office.theodi.org")
conn.request("POST", "/staff/staff_action.php", params, headers)
response = conn.getresponse()
print response.status, response.reason
data = response.read()
conn.close()
p = Popen(["afplay", str(response.status) + ".mp3"])
else:
print ' No card present\r',
sys.stdout.flush()
| mit | 8,844,806,863,099,645,000 | 28.301587 | 89 | 0.705309 | false |
jprchlik/cms2_python_helpers | create_fit_plots_wrapper.py | 1 | 2144 | import sys, getopt
import create_fit_plots as cfp
def main(argv):
inargs1 = 'ht:c:o:f:l:'
snargs1 = inargs1[1:].split(':')
inargs2 = ['time','cmsdir','outdir','fitfile','loopfile']
helpinfo = "create_model_files_wrapper.py is a command line utility which calls the class create_model_files\n"
helpinfo = helpinfo+"The command takes only a few arguments and if you stick to a common theme you should only have to change the time between run\n"
helpinfo = helpinfo+"python create_model_files_wrapper.py"
for i in range(len(inargs2)): helpinfo=helpinfo+' -'+snargs1[i]+' <--'+inargs2[i]+'>'
helpinfo=helpinfo+'\n'
#Descriptive information about each keyword
argsdes=["A string time in the format of YYYY/MM/DD HH:MM:SS",
"The directory containing the CMS2 (default = read 'cms2_dir' file)",
"The directory format for the sigmoid (assumes a subdirectory of cmsdir (default = YYYY/MM/DD/HHMM/",
"The fit file (default = fit_information.dat)",
"The loop file (default = fit_loops.dat)"]
for i in range(len(inargs2)): helpinfo = helpinfo+' -'+snargs1[i]+' <--'+inargs2[i]+'> : '+argsdes[i]+'\n'
#load user values
try:
opts,args = getopt.getopt(argv,inargs1,inargs2)
except getop.GetoptError:
print(helpinfo)
sys.exit(2)
#default for directory structure
sigd = '%Y/%m/%d/%H%M/'
#default for the fit file
fitf = 'fit_information.dat'
#default for the loop file
lpsf = 'fit_loops.dat'
#default for cms2 directory
cmsd = ''
for opt, arg in opts:
if opt == '-h':
print(helpinfo)
sys.exit(0)
elif opt in ("-t","--time"):
time = arg
elif opt in ("-c","--cmsdir"):
cmsd = arg
elif opt in ("-o","--outdir"):
sigd = arg
elif opt in ("-f","--fitfile"):
fitf = arg
elif opt in ("-l","--loopfile"):
lpsf = arg
plot = cfp.cms2_plot(time,cmsdir=cmsd,outdir=sigd,fit_file=fitf,lop_file=lpsf)
plot.create_plots()
if __name__ == "__main__":
main(sys.argv[1:])
| mit | 7,184,129,804,112,462,000 | 30.529412 | 153 | 0.596082 | false |
Qwaz/solved-hacking-problem | PlaidCTF/2020/dyrpto/message_pb2.py | 1 | 2230 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='message.proto',
package='dyrpto',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\rmessage.proto\x12\x06\x64yrpto\"\"\n\x07Message\x12\n\n\x02id\x18\x01 \x02(\x05\x12\x0b\n\x03msg\x18\x02 \x02(\t')
)
_MESSAGE = _descriptor.Descriptor(
name='Message',
full_name='dyrpto.Message',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='dyrpto.Message.id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='dyrpto.Message.msg', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=59,
)
DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), dict(
DESCRIPTOR = _MESSAGE,
__module__ = 'message_pb2'
# @@protoc_insertion_point(class_scope:dyrpto.Message)
))
_sym_db.RegisterMessage(Message)
# @@protoc_insertion_point(module_scope)
| gpl-2.0 | 79,200,460,740,941,090 | 27.961039 | 138 | 0.702691 | false |
art-of-dom/hash-it | test/test_cli.py | 1 | 8528 | '''Tests for the cli interface'''
from __future__ import absolute_import
import unittest
import sys
from nose.tools import assert_equals
from hashit.cli.cli import cli_main
from hashit.cli.cli_status import CliStatus
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
# pylint: disable=no-self-use
# pylint: disable=bad-continuation
class TestCLI(unittest.TestCase):
def setUp(self):
self.args = {
'--hash-type': None,
'--generate': None,
'--verify': None,
'-r': False,
'-f': False,
'-a': False,
'-x': False,
'-b': False,
'<input>': None
}
def tearDown(self):
pass
# arg checks
def test_cil_retruns_error_if_no_args(self):
assert_equals(CliStatus.ARG_INVALID.value, cli_main(None))
def test_cil_retruns_success_no_vaild_args(self):
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
# arg checks hash-type check
def test_cil_retruns_success_known_hash_uppercase(self):
self.args['--hash-type'] = 'CRC32'
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_retruns_success_known_hash_lowercase(self):
self.args['--hash-type'] = 'crc32'
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_retruns_success_known_hash_mixedcase(self):
self.args['--hash-type'] = 'cRc32'
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_retruns_error_unknown_hash(self):
self.args['--hash-type'] = 'foobar'
assert_equals(CliStatus.ARG_INVALID.value, cli_main(self.args))
self.assertEqual("Unknown hash type foobar",
sys.stdout.getvalue().strip()
)
# base hash / base hash-type
def test_cil_uses_default_hash_on_file(self):
self.args['-f'] = True
self.args['<input>'] = 'test/support/example.bin'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: test/support/example.bin | hash: BAD3",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_ascii(self):
self.args['-a'] = True
self.args['<input>'] = '123456789'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: 123456789 | hash: BB3D",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_hex(self):
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: 010203040506070809 | hash: 4204",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_file_reverse(self):
self.args['-f'] = True
self.args['-r'] = True
self.args['<input>'] = 'test/support/example.bin'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: test/support/example.bin | hash: EE93",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_ascii_reverse(self):
self.args['-a'] = True
self.args['-r'] = True
self.args['<input>'] = '123456789'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: 123456789 | hash: 39D9",
sys.stdout.getvalue().strip()
)
def test_cil_uses_default_hash_on_hex_reverse(self):
self.args['-x'] = True
self.args['-r'] = True
self.args['<input>'] = '010203040506070809'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
self.assertEqual("input: 010203040506070809 | hash: C0E0",
sys.stdout.getvalue().strip()
)
# verify hash
def test_cil_verify_bad_hash_size(self):
self.args['-f'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = '0BAD3'
assert_equals(CliStatus.ARG_INVALID.value, cli_main(self.args))
def test_cil_verify_good_result_returns_zero_file(self):
self.args['-f'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = 'BAD3'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_bad_result_returns_error_file(self):
self.args['-f'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
def test_cil_verify_good_result_returns_zero_ascii(self):
self.args['-a'] = True
self.args['<input>'] = '123456789'
self.args['--verify'] = 'BB3D'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_bad_result_returns_error_ascii(self):
self.args['-a'] = True
self.args['<input>'] = '123456789'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
def test_cil_verify_good_result_returns_zero_hex(self):
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
self.args['--verify'] = '4204'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_bad_result_returns_error_hex(self):
self.args['-x'] = True
self.args['<input>'] = '010203040506070809'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
# verify hash brute force
def test_cil_verify_brute_force_good_result_returns_zero_file(self):
self.args['-f'] = True
self.args['-b'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = 'BAD3'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_brute_force_bad_result_returns_error_file(self):
self.args['-f'] = True
self.args['-b'] = True
self.args['<input>'] = 'test/support/example.bin'
self.args['--verify'] = '000D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
def test_cil_verify_brute_force_good_result_returns_zero_ascii(self):
self.args['-a'] = True
self.args['-b'] = True
self.args['<input>'] = '123456789'
self.args['--verify'] = 'BB3D'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_brute_force_bad_result_returns_error_ascii(self):
self.args['-a'] = True
self.args['-b'] = True
self.args['<input>'] = '123456789'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
def test_cil_verify_brute_force_good_result_returns_zero_hex(self):
self.args['-x'] = True
self.args['-b'] = True
self.args['<input>'] = '010203040506070809'
self.args['--verify'] = '4204'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_verify_brute_force_bad_result_returns_error_hex(self):
self.args['-x'] = True
self.args['-b'] = True
self.args['<input>'] = '010203040506070809'
self.args['--verify'] = 'F00D'
assert_equals(CliStatus.VALIDATION_ERROR.value, cli_main(self.args))
# generate hash
def test_cil_generate_bad_hash(self):
self.args['--generate'] = '0BAD3'
assert_equals(CliStatus.ARG_INVALID.value, cli_main(self.args))
def test_cil_generate_good_hash_returns_success(self):
self.args['--generate'] = 'BAD3'
assert_equals(CliStatus.SUCCESS.value, cli_main(self.args))
def test_cil_generate_unhandled_hash_generation_error(self):
self.args['--hash-type'] = 'CRC32'
self.args['--generate'] = 'BAD3BAD3'
assert_equals(CliStatus.GENERATION_ERROR.value, cli_main(self.args))
| mit | -9,095,046,861,608,438,000 | 38.299539 | 76 | 0.593574 | false |
ContinuumIO/ashiba | enaml/enaml/qt/qt_frame.py | 1 | 2010 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed
from enaml.widgets.frame import ProxyFrame
from .QtGui import QFrame
from .qt_constraints_widget import QtConstraintsWidget
STYLE = {
'box': QFrame.Box,
'panel': QFrame.Panel,
'styled_panel': QFrame.StyledPanel,
}
LINE_STYLE = {
'plain': QFrame.Plain,
'sunken': QFrame.Sunken,
'raised': QFrame.Raised,
}
class QtFrame(QtConstraintsWidget, ProxyFrame):
""" A Qt implementation of an Enaml ProxyFrame.
"""
#: A reference to the toolkit widget created by the proxy.
widget = Typed(QFrame)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Creates the QContainer widget.
"""
self.widget = QFrame(self.parent_widget())
def init_widget(self):
""" Initialize the widget.
"""
super(QtFrame, self).init_widget()
self.set_border(self.declaration.border)
#--------------------------------------------------------------------------
# ProxyFrame API
#--------------------------------------------------------------------------
def set_border(self, border):
""" Set the border for the widget.
"""
widget = self.widget
if border is None:
widget.setFrameShape(QFrame.NoFrame)
return
widget.setFrameShape(STYLE[border.style])
widget.setFrameShadow(LINE_STYLE[border.line_style])
widget.setLineWidth(border.line_width)
widget.setMidLineWidth(border.midline_width)
| bsd-3-clause | -79,735,812,177,728,620 | 28.558824 | 79 | 0.498507 | false |
ukuleleplayer/pureples | setup.py | 1 | 1291 | from setuptools import setup
setup(
name='pureples',
version='0.0',
author='adrian, simon',
author_email='[email protected]',
maintainer='simon, adrian',
maintainer_email='[email protected]',
url='https://github.com/ukuleleplayer/pureples',
license="MIT",
description='HyperNEAT and ES-HyperNEAT implemented in pure Python',
long_description='Python implementation of HyperNEAT and ES-HyperNEAT ' +
'developed by Adrian Westh and Simon Krabbe Munck for evolving arbitrary neural networks. ' +
'HyperNEAT and ES-HyperNEAT is originally developed by Kenneth O. Stanley and Sebastian Risi',
packages=['pureples', 'pureples/hyperneat', 'pureples/es_hyperneat', 'pureples/shared'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.x',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Scientific/Engineering'
],
install_requires=['numpy', 'neat-python', 'graphviz', 'matplotlib', 'gym']
)
| mit | 1,199,683,710,172,937,200 | 43.517241 | 115 | 0.650658 | false |
theanalyst/cinder | cinder/volume/drivers/vmware/error_util.py | 1 | 2271 | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception classes and SOAP response error checking module.
"""
from cinder import exception
NOT_AUTHENTICATED = 'NotAuthenticated'
class VimException(exception.CinderException):
"""The VIM Exception class."""
def __init__(self, msg):
exception.CinderException.__init__(self, msg)
class SessionOverLoadException(VimException):
"""Session Overload Exception."""
pass
class VimAttributeException(VimException):
"""VI Attribute Error."""
pass
class VimConnectionException(VimException):
"""Thrown when there is a connection problem."""
pass
class VimFaultException(VimException):
"""Exception thrown when there are faults during VIM API calls."""
def __init__(self, fault_list, msg):
super(VimFaultException, self).__init__(msg)
self.fault_list = fault_list
class VMwareDriverException(exception.CinderException):
"""Base class for all exceptions raised by the VMDK driver.
All exceptions raised by the vmdk driver should raise an exception
descended from this class as a root. This will allow the driver to
potentially trap problems related to its own internal configuration
before halting the cinder-volume node.
"""
message = _("VMware VMDK driver exception.")
class VMwaredriverConfigurationException(VMwareDriverException):
"""Base class for all configuration exceptions.
"""
message = _("VMware VMDK driver configuration error.")
class InvalidAdapterTypeException(VMwareDriverException):
"""Thrown when the disk adapter type is invalid."""
message = _("Invalid disk adapter type: %(invalid_type)s.")
| apache-2.0 | 6,477,092,069,620,219,000 | 29.689189 | 78 | 0.721268 | false |
parksandwildlife/wastd | wastd/contrib/sites/migrations/0001_initial.py | 1 | 1068 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('domain', models.CharField(verbose_name='domain name', max_length=100,
validators=[django.contrib.sites.models._simple_domain_name_validator])),
('name', models.CharField(verbose_name='display name', max_length=50)),
],
options={
'verbose_name_plural': 'sites',
'verbose_name': 'site',
'db_table': 'django_site',
'ordering': ('domain',),
},
managers=[
('objects', django.contrib.sites.models.SiteManager()),
],
),
]
| mit | 6,190,446,289,729,111,000 | 32.375 | 117 | 0.522472 | false |
gamernetwork/gn-django | tests/gn_django_tests/test_video.py | 1 | 6595 | import unittest
from itertools import chain
from gn_django.video import facebook, twitch, youtube
class TestFacebook(unittest.TestCase):
"""
Test cases for Facebook helper functions
"""
valid_urls = (
'https://www.facebook.com/watch/live/?v=1844067005728897',
'http://www.facebook.com/watch/live/?v=1844067005728897',
'https://facebook.com/watch/live/?v=1844067005728897',
'https://facebook.com/watch/live/?v=1844067005728897',
'http://facebook.com/watch/live/?v=1844067005728897&ref=external',
'https://facebook.com/watch/live?v=1844067005728897',
'https://www.facebook.com/watch/live/?v=1844067005728897',
'https://www.facebook.com/watch/live?v=1844067005728897',
'https://www.facebook.com/watch/live/?v=1844067005728897&test=123',
'https://www.facebook.com/watch/live/?hello=there&v=1844067005728897&such',
'https://www.facebook.com/watch/live/?test=123&v=1844067005728897',
'https://www.facebook.com/newyorkcomiccon/videos/1844067005728897',
'http://www.facebook.com/newyorkcomiccon/videos/1844067005728897',
'https://facebook.com/newyorkcomiccon/videos/1844067005728897',
'https://www.facebook.com/newyorkcomiccon/videos/1844067005728897/',
'https://www.facebook.com/newyorkcomiccon/videos/1844067005728897/?test=123&example=testing',
'https://www.facebook.com/video/embed?video_id=1844067005728897',
'http://www.facebook.com/video/embed?video_id=1844067005728897',
'https://facebook.com/video/embed?video_id=1844067005728897',
'https://www.facebook.com/video/embed?video_id=1844067005728897&test=123',
'https://www.facebook.com/video/embed?test=123&video_id=1844067005728897',
'https://www.facebook.com/video/embed?video_id=1844067005728897',
'https://www.facebook.com/video/embed?test=123&example=&video_id=1844067005728897',
'https://www.facebook.com/video/embed?video_id=1844067005728897&testvideo_id=123',
)
invalid_urls = (
'https://www.example.com/1844067005728897',
'https://www.vimeo.com/1844067005728897',
'https://www.youtube.com',
'https://www.twitch.tv',
'https://www.facebook.com/watch/live/?test_v=1844067005728897',
'https://www.facebook.com/watch/live/?v_test=1844067005728897',
'https://www.facebook.com/video/embed?testvideo_id=123',
'https://www.facebook.com/video/embed?video_id_test=123',
)
def test_get_id(self):
"""
Test that all Facebook URL formats return the correct video ID
"""
expected = '1844067005728897'
for url in self.valid_urls:
vid = facebook.get_id(url)
self.assertEqual(expected, vid)
def test_get_id_invalid(self):
"""
Test that invalid Facebook video URLs return `None`
"""
for url in self.invalid_urls:
self.assertEqual(None, facebook.get_id(url))
class TestTwitch(unittest.TestCase):
valid_channel_urls = {
'zeldaspeedruns': 'https://www.twitch.tv/zeldaspeedruns',
'argick': 'https://www.twitch.tv/argick?autoplay=true',
'z0wrr3rwDWxG': 'https://www.twitch.tv/z0wrr3rwDWxG',
}
valid_vod_urls = {
'1': 'https://www.twitch.tv/videos/1',
'1337': 'https://www.twitch.tv/videos/1337/?utm_source=google',
'467234': 'https://www.twitch.tv/videos/467234',
}
invalid_urls = (
'https://www.twitch.tv',
'https://www.twitch.tv/zeldaspeedruns/123456',
'https://www.twitch.tv/watchthischannelitsnicebuttoolong',
'https://www.twitch.tv/tv',
'https://www.twitch.tv/videos/zeldaspeedruns',
'https://www.twitch.tv/videos/-1',
'https://www.example.com/tjrIMKo-1Ds',
'https://www.vimeo.com/tjrIMKo-1Ds',
'https://www.youtube.com/watch?v=tjrIMKo-1Ds',
'https://youtu.be/tjrIMKo-1Ds',
)
def test_get_channel(self):
for expected, valid_url in self.valid_channel_urls.items():
self.assertEqual(expected, twitch.get_channel(valid_url))
for url in chain(self.valid_vod_urls.values(), self.invalid_urls):
self.assertIsNone(twitch.get_channel(url))
def test_get_vod(self):
for expected, valid_url in self.valid_vod_urls.items():
self.assertEqual(expected, twitch.get_vod(valid_url))
for invalid_url in chain(self.valid_channel_urls.values(), self.invalid_urls):
self.assertIsNone(twitch.get_vod(invalid_url))
class TestYoutube(unittest.TestCase):
"""
Test cases for Youtube helper functions
"""
valid_urls = (
'https://www.youtube.com/watch?v=tjrIMKo-1Ds',
'http://www.youtube.com/watch?v=tjrIMKo-1Ds',
'https://youtu.be/tjrIMKo-1Ds',
'http://youtu.be/tjrIMKo-1Ds',
'https://www.youtube.com/embed/tjrIMKo-1Ds',
'http://www.youtube.com/embed/tjrIMKo-1Ds',
'https://www.youtube.com/v/tjrIMKo-1Ds?version=3&autohide=1',
'http://www.youtube.com/v/tjrIMKo-1Ds?version=3&autohide=1',
)
invalid_urls = (
'https://www.example.com/tjrIMKo-1Ds',
'https://www.vimeo.com/tjrIMKo-1Ds',
'https://www.youtube.com'
)
img_formats = (
'default',
'mqdefault',
'maxresdefault',
'0',
'1',
'2',
'3',
)
def test_get_id(self):
"""
Test that all Youtube URL formats return the correct video ID
"""
expected = 'tjrIMKo-1Ds'
for url in self.valid_urls:
vid = youtube.get_id(url)
self.assertEqual(expected, vid)
def test_get_id_invalid(self):
"""
Test that invalid Youtube video URLs return `None`
"""
for url in self.invalid_urls:
self.assertEqual(None, youtube.get_id(url))
def test_get_thumb(self):
"""
Test that all Youtube URL format return all Youtube thumbnail formats
"""
expected = 'http://i3.ytimg.com/vi/tjrIMKo-1Ds/%s.jpg'
for url in self.valid_urls:
for f in self.img_formats:
thumb = youtube.get_thumb(url, f)
self.assertEqual(expected % f, thumb)
def test_get_thumb_invalid(self):
"""
Test that invalid Youtube video URLs return `None`
"""
for url in self.invalid_urls:
for f in self.img_formats:
thumb = youtube.get_thumb(url, f)
self.assertEqual(None, thumb)
| mit | -7,827,708,248,405,796,000 | 37.343023 | 101 | 0.621228 | false |
Yukarumya/Yukarum-Redfoxes | python/mozbuild/mozbuild/test/configure/test_configure.py | 1 | 43731 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
from StringIO import StringIO
import os
import sys
import textwrap
import unittest
from mozunit import (
main,
MockedOpen,
)
from mozbuild.configure.options import (
InvalidOptionError,
NegativeOptionValue,
PositiveOptionValue,
)
from mozbuild.configure import (
ConfigureError,
ConfigureSandbox,
)
from mozbuild.util import exec_
import mozpack.path as mozpath
test_data_path = mozpath.abspath(mozpath.dirname(__file__))
test_data_path = mozpath.join(test_data_path, 'data')
class TestConfigure(unittest.TestCase):
def get_config(self, options=[], env={}, configure='moz.configure',
prog='/bin/configure'):
config = {}
out = StringIO()
sandbox = ConfigureSandbox(config, env, [prog] + options, out, out)
sandbox.run(mozpath.join(test_data_path, configure))
if '--help' in options:
return out.getvalue(), config
self.assertEquals('', out.getvalue())
return config
def moz_configure(self, source):
return MockedOpen({
os.path.join(test_data_path,
'moz.configure'): textwrap.dedent(source)
})
def test_defaults(self):
config = self.get_config()
self.maxDiff = None
self.assertEquals({
'CHOICES': NegativeOptionValue(),
'DEFAULTED': PositiveOptionValue(('not-simple',)),
'IS_GCC': NegativeOptionValue(),
'REMAINDER': (PositiveOptionValue(), NegativeOptionValue(),
NegativeOptionValue(), NegativeOptionValue()),
'SIMPLE': NegativeOptionValue(),
'VALUES': NegativeOptionValue(),
'VALUES2': NegativeOptionValue(),
'VALUES3': NegativeOptionValue(),
'WITH_ENV': NegativeOptionValue(),
}, config)
def test_help(self):
help, config = self.get_config(['--help'], prog='configure')
self.assertEquals({}, config)
self.maxDiff = None
self.assertEquals(
'Usage: configure [options]\n'
'\n'
'Options: [defaults in brackets after descriptions]\n'
' --help print this message\n'
' --enable-simple Enable simple\n'
' --enable-with-env Enable with env\n'
' --enable-values Enable values\n'
' --without-thing Build without thing\n'
' --with-stuff Build with stuff\n'
' --option Option\n'
' --with-returned-default Returned default [not-simple]\n'
' --returned-choices Choices\n'
' --enable-imports-in-template\n'
' Imports in template\n'
' --enable-include Include\n'
' --with-imports Imports\n'
'\n'
'Environment variables:\n'
' CC C Compiler\n',
help
)
def test_unknown(self):
with self.assertRaises(InvalidOptionError):
self.get_config(['--unknown'])
def test_simple(self):
for config in (
self.get_config(),
self.get_config(['--disable-simple']),
# Last option wins.
self.get_config(['--enable-simple', '--disable-simple']),
):
self.assertNotIn('ENABLED_SIMPLE', config)
self.assertIn('SIMPLE', config)
self.assertEquals(NegativeOptionValue(), config['SIMPLE'])
for config in (
self.get_config(['--enable-simple']),
self.get_config(['--disable-simple', '--enable-simple']),
):
self.assertIn('ENABLED_SIMPLE', config)
self.assertIn('SIMPLE', config)
self.assertEquals(PositiveOptionValue(), config['SIMPLE'])
self.assertIs(config['SIMPLE'], config['ENABLED_SIMPLE'])
# --enable-simple doesn't take values.
with self.assertRaises(InvalidOptionError):
self.get_config(['--enable-simple=value'])
def test_with_env(self):
for config in (
self.get_config(),
self.get_config(['--disable-with-env']),
self.get_config(['--enable-with-env', '--disable-with-env']),
self.get_config(env={'MOZ_WITH_ENV': ''}),
# Options win over environment
self.get_config(['--disable-with-env'],
env={'MOZ_WITH_ENV': '1'}),
):
self.assertIn('WITH_ENV', config)
self.assertEquals(NegativeOptionValue(), config['WITH_ENV'])
for config in (
self.get_config(['--enable-with-env']),
self.get_config(['--disable-with-env', '--enable-with-env']),
self.get_config(env={'MOZ_WITH_ENV': '1'}),
self.get_config(['--enable-with-env'],
env={'MOZ_WITH_ENV': ''}),
):
self.assertIn('WITH_ENV', config)
self.assertEquals(PositiveOptionValue(), config['WITH_ENV'])
with self.assertRaises(InvalidOptionError):
self.get_config(['--enable-with-env=value'])
with self.assertRaises(InvalidOptionError):
self.get_config(env={'MOZ_WITH_ENV': 'value'})
def test_values(self, name='VALUES'):
for config in (
self.get_config(),
self.get_config(['--disable-values']),
self.get_config(['--enable-values', '--disable-values']),
):
self.assertIn(name, config)
self.assertEquals(NegativeOptionValue(), config[name])
for config in (
self.get_config(['--enable-values']),
self.get_config(['--disable-values', '--enable-values']),
):
self.assertIn(name, config)
self.assertEquals(PositiveOptionValue(), config[name])
config = self.get_config(['--enable-values=foo'])
self.assertIn(name, config)
self.assertEquals(PositiveOptionValue(('foo',)), config[name])
config = self.get_config(['--enable-values=foo,bar'])
self.assertIn(name, config)
self.assertTrue(config[name])
self.assertEquals(PositiveOptionValue(('foo', 'bar')), config[name])
def test_values2(self):
self.test_values('VALUES2')
def test_values3(self):
self.test_values('VALUES3')
def test_returned_default(self):
config = self.get_config(['--enable-simple'])
self.assertIn('DEFAULTED', config)
self.assertEquals(
PositiveOptionValue(('simple',)), config['DEFAULTED'])
config = self.get_config(['--disable-simple'])
self.assertIn('DEFAULTED', config)
self.assertEquals(
PositiveOptionValue(('not-simple',)), config['DEFAULTED'])
def test_returned_choices(self):
for val in ('a', 'b', 'c'):
config = self.get_config(
['--enable-values=alpha', '--returned-choices=%s' % val])
self.assertIn('CHOICES', config)
self.assertEquals(PositiveOptionValue((val,)), config['CHOICES'])
for val in ('0', '1', '2'):
config = self.get_config(
['--enable-values=numeric', '--returned-choices=%s' % val])
self.assertIn('CHOICES', config)
self.assertEquals(PositiveOptionValue((val,)), config['CHOICES'])
with self.assertRaises(InvalidOptionError):
self.get_config(['--enable-values=numeric',
'--returned-choices=a'])
with self.assertRaises(InvalidOptionError):
self.get_config(['--enable-values=alpha', '--returned-choices=0'])
def test_included(self):
config = self.get_config(env={'CC': 'gcc'})
self.assertIn('IS_GCC', config)
self.assertEquals(config['IS_GCC'], True)
config = self.get_config(
['--enable-include=extra.configure', '--extra'])
self.assertIn('EXTRA', config)
self.assertEquals(PositiveOptionValue(), config['EXTRA'])
with self.assertRaises(InvalidOptionError):
self.get_config(['--extra'])
def test_template(self):
config = self.get_config(env={'CC': 'gcc'})
self.assertIn('CFLAGS', config)
self.assertEquals(config['CFLAGS'], ['-Werror=foobar'])
config = self.get_config(env={'CC': 'clang'})
self.assertNotIn('CFLAGS', config)
def test_imports(self):
config = {}
out = StringIO()
sandbox = ConfigureSandbox(config, {}, [], out, out)
with self.assertRaises(ImportError):
exec_(textwrap.dedent('''
@template
def foo():
import sys
foo()'''),
sandbox
)
exec_(textwrap.dedent('''
@template
@imports('sys')
def foo():
return sys'''),
sandbox
)
self.assertIs(sandbox['foo'](), sys)
exec_(textwrap.dedent('''
@template
@imports(_from='os', _import='path')
def foo():
return path'''),
sandbox
)
self.assertIs(sandbox['foo'](), os.path)
exec_(textwrap.dedent('''
@template
@imports(_from='os', _import='path', _as='os_path')
def foo():
return os_path'''),
sandbox
)
self.assertIs(sandbox['foo'](), os.path)
exec_(textwrap.dedent('''
@template
@imports('__builtin__')
def foo():
return __builtin__'''),
sandbox
)
import __builtin__
self.assertIs(sandbox['foo'](), __builtin__)
exec_(textwrap.dedent('''
@template
@imports(_from='__builtin__', _import='open')
def foo():
return open('%s')''' % os.devnull),
sandbox
)
f = sandbox['foo']()
self.assertEquals(f.name, os.devnull)
f.close()
# This unlocks the sandbox
exec_(textwrap.dedent('''
@template
@imports(_import='__builtin__', _as='__builtins__')
def foo():
import sys
return sys'''),
sandbox
)
self.assertIs(sandbox['foo'](), sys)
exec_(textwrap.dedent('''
@template
@imports('__sandbox__')
def foo():
return __sandbox__'''),
sandbox
)
self.assertIs(sandbox['foo'](), sandbox)
exec_(textwrap.dedent('''
@template
@imports(_import='__sandbox__', _as='s')
def foo():
return s'''),
sandbox
)
self.assertIs(sandbox['foo'](), sandbox)
# Nothing leaked from the function being executed
self.assertEquals(sandbox.keys(), ['__builtins__', 'foo'])
self.assertEquals(sandbox['__builtins__'], ConfigureSandbox.BUILTINS)
exec_(textwrap.dedent('''
@template
@imports('sys')
def foo():
@depends(when=True)
def bar():
return sys
return bar
bar = foo()'''),
sandbox
)
with self.assertRaises(NameError) as e:
sandbox._depends[sandbox['bar']].result
self.assertEquals(e.exception.message,
"global name 'sys' is not defined")
def test_apply_imports(self):
imports = []
class CountApplyImportsSandbox(ConfigureSandbox):
def _apply_imports(self, *args, **kwargs):
imports.append((args, kwargs))
super(CountApplyImportsSandbox, self)._apply_imports(
*args, **kwargs)
config = {}
out = StringIO()
sandbox = CountApplyImportsSandbox(config, {}, [], out, out)
exec_(textwrap.dedent('''
@template
@imports('sys')
def foo():
return sys
foo()
foo()'''),
sandbox
)
self.assertEquals(len(imports), 1)
def test_os_path(self):
config = self.get_config(['--with-imports=%s' % __file__])
self.assertIn('HAS_ABSPATH', config)
self.assertEquals(config['HAS_ABSPATH'], True)
self.assertIn('HAS_GETATIME', config)
self.assertEquals(config['HAS_GETATIME'], True)
self.assertIn('HAS_GETATIME2', config)
self.assertEquals(config['HAS_GETATIME2'], False)
def test_template_call(self):
config = self.get_config(env={'CC': 'gcc'})
self.assertIn('TEMPLATE_VALUE', config)
self.assertEquals(config['TEMPLATE_VALUE'], 42)
self.assertIn('TEMPLATE_VALUE_2', config)
self.assertEquals(config['TEMPLATE_VALUE_2'], 21)
def test_template_imports(self):
config = self.get_config(['--enable-imports-in-template'])
self.assertIn('PLATFORM', config)
self.assertEquals(config['PLATFORM'], sys.platform)
def test_decorators(self):
config = {}
out = StringIO()
sandbox = ConfigureSandbox(config, {}, [], out, out)
sandbox.include_file(mozpath.join(test_data_path, 'decorators.configure'))
self.assertNotIn('FOO', sandbox)
self.assertNotIn('BAR', sandbox)
self.assertNotIn('QUX', sandbox)
def test_set_config(self):
def get_config(*args):
return self.get_config(*args, configure='set_config.configure')
help, config = get_config(['--help'])
self.assertEquals(config, {})
config = get_config(['--set-foo'])
self.assertIn('FOO', config)
self.assertEquals(config['FOO'], True)
config = get_config(['--set-bar'])
self.assertNotIn('FOO', config)
self.assertIn('BAR', config)
self.assertEquals(config['BAR'], True)
config = get_config(['--set-value=qux'])
self.assertIn('VALUE', config)
self.assertEquals(config['VALUE'], 'qux')
config = get_config(['--set-name=hoge'])
self.assertIn('hoge', config)
self.assertEquals(config['hoge'], True)
config = get_config([])
self.assertEquals(config, {'BAR': False})
with self.assertRaises(ConfigureError):
# Both --set-foo and --set-name=FOO are going to try to
# set_config('FOO'...)
get_config(['--set-foo', '--set-name=FOO'])
def test_set_config_when(self):
with self.moz_configure('''
option('--with-qux', help='qux')
set_config('FOO', 'foo', when=True)
set_config('BAR', 'bar', when=False)
set_config('QUX', 'qux', when='--with-qux')
'''):
config = self.get_config()
self.assertEquals(config, {
'FOO': 'foo',
})
config = self.get_config(['--with-qux'])
self.assertEquals(config, {
'FOO': 'foo',
'QUX': 'qux',
})
def test_set_define(self):
def get_config(*args):
return self.get_config(*args, configure='set_define.configure')
help, config = get_config(['--help'])
self.assertEquals(config, {'DEFINES': {}})
config = get_config(['--set-foo'])
self.assertIn('FOO', config['DEFINES'])
self.assertEquals(config['DEFINES']['FOO'], True)
config = get_config(['--set-bar'])
self.assertNotIn('FOO', config['DEFINES'])
self.assertIn('BAR', config['DEFINES'])
self.assertEquals(config['DEFINES']['BAR'], True)
config = get_config(['--set-value=qux'])
self.assertIn('VALUE', config['DEFINES'])
self.assertEquals(config['DEFINES']['VALUE'], 'qux')
config = get_config(['--set-name=hoge'])
self.assertIn('hoge', config['DEFINES'])
self.assertEquals(config['DEFINES']['hoge'], True)
config = get_config([])
self.assertEquals(config['DEFINES'], {'BAR': False})
with self.assertRaises(ConfigureError):
# Both --set-foo and --set-name=FOO are going to try to
# set_define('FOO'...)
get_config(['--set-foo', '--set-name=FOO'])
def test_set_define_when(self):
with self.moz_configure('''
option('--with-qux', help='qux')
set_define('FOO', 'foo', when=True)
set_define('BAR', 'bar', when=False)
set_define('QUX', 'qux', when='--with-qux')
'''):
config = self.get_config()
self.assertEquals(config['DEFINES'], {
'FOO': 'foo',
})
config = self.get_config(['--with-qux'])
self.assertEquals(config['DEFINES'], {
'FOO': 'foo',
'QUX': 'qux',
})
def test_imply_option_simple(self):
def get_config(*args):
return self.get_config(
*args, configure='imply_option/simple.configure')
help, config = get_config(['--help'])
self.assertEquals(config, {})
config = get_config([])
self.assertEquals(config, {})
config = get_config(['--enable-foo'])
self.assertIn('BAR', config)
self.assertEquals(config['BAR'], PositiveOptionValue())
with self.assertRaises(InvalidOptionError) as e:
get_config(['--enable-foo', '--disable-bar'])
self.assertEquals(
e.exception.message,
"'--enable-bar' implied by '--enable-foo' conflicts with "
"'--disable-bar' from the command-line")
def test_imply_option_negative(self):
def get_config(*args):
return self.get_config(
*args, configure='imply_option/negative.configure')
help, config = get_config(['--help'])
self.assertEquals(config, {})
config = get_config([])
self.assertEquals(config, {})
config = get_config(['--enable-foo'])
self.assertIn('BAR', config)
self.assertEquals(config['BAR'], NegativeOptionValue())
with self.assertRaises(InvalidOptionError) as e:
get_config(['--enable-foo', '--enable-bar'])
self.assertEquals(
e.exception.message,
"'--disable-bar' implied by '--enable-foo' conflicts with "
"'--enable-bar' from the command-line")
config = get_config(['--disable-hoge'])
self.assertIn('BAR', config)
self.assertEquals(config['BAR'], NegativeOptionValue())
with self.assertRaises(InvalidOptionError) as e:
get_config(['--disable-hoge', '--enable-bar'])
self.assertEquals(
e.exception.message,
"'--disable-bar' implied by '--disable-hoge' conflicts with "
"'--enable-bar' from the command-line")
def test_imply_option_values(self):
def get_config(*args):
return self.get_config(
*args, configure='imply_option/values.configure')
help, config = get_config(['--help'])
self.assertEquals(config, {})
config = get_config([])
self.assertEquals(config, {})
config = get_config(['--enable-foo=a'])
self.assertIn('BAR', config)
self.assertEquals(config['BAR'], PositiveOptionValue(('a',)))
config = get_config(['--enable-foo=a,b'])
self.assertIn('BAR', config)
self.assertEquals(config['BAR'], PositiveOptionValue(('a','b')))
with self.assertRaises(InvalidOptionError) as e:
get_config(['--enable-foo=a,b', '--disable-bar'])
self.assertEquals(
e.exception.message,
"'--enable-bar=a,b' implied by '--enable-foo' conflicts with "
"'--disable-bar' from the command-line")
def test_imply_option_infer(self):
def get_config(*args):
return self.get_config(
*args, configure='imply_option/infer.configure')
help, config = get_config(['--help'])
self.assertEquals(config, {})
config = get_config([])
self.assertEquals(config, {})
with self.assertRaises(InvalidOptionError) as e:
get_config(['--enable-foo', '--disable-bar'])
self.assertEquals(
e.exception.message,
"'--enable-bar' implied by '--enable-foo' conflicts with "
"'--disable-bar' from the command-line")
with self.assertRaises(ConfigureError) as e:
self.get_config([], configure='imply_option/infer_ko.configure')
self.assertEquals(
e.exception.message,
"Cannot infer what implies '--enable-bar'. Please add a `reason` "
"to the `imply_option` call.")
def test_imply_option_immediate_value(self):
def get_config(*args):
return self.get_config(
*args, configure='imply_option/imm.configure')
help, config = get_config(['--help'])
self.assertEquals(config, {})
config = get_config([])
self.assertEquals(config, {})
config_path = mozpath.abspath(
mozpath.join(test_data_path, 'imply_option', 'imm.configure'))
with self.assertRaisesRegexp(InvalidOptionError,
"--enable-foo' implied by 'imply_option at %s:7' conflicts with "
"'--disable-foo' from the command-line" % config_path):
get_config(['--disable-foo'])
with self.assertRaisesRegexp(InvalidOptionError,
"--enable-bar=foo,bar' implied by 'imply_option at %s:16' conflicts"
" with '--enable-bar=a,b,c' from the command-line" % config_path):
get_config(['--enable-bar=a,b,c'])
with self.assertRaisesRegexp(InvalidOptionError,
"--enable-baz=BAZ' implied by 'imply_option at %s:25' conflicts"
" with '--enable-baz=QUUX' from the command-line" % config_path):
get_config(['--enable-baz=QUUX'])
def test_imply_option_failures(self):
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
imply_option('--with-foo', ('a',), 'bar')
'''):
self.get_config()
self.assertEquals(e.exception.message,
"`--with-foo`, emitted from `%s` line 2, is unknown."
% mozpath.join(test_data_path, 'moz.configure'))
with self.assertRaises(TypeError) as e:
with self.moz_configure('''
imply_option('--with-foo', 42, 'bar')
option('--with-foo', help='foo')
@depends('--with-foo')
def foo(value):
return value
'''):
self.get_config()
self.assertEquals(e.exception.message,
"Unexpected type: 'int'")
def test_imply_option_when(self):
with self.moz_configure('''
option('--with-foo', help='foo')
imply_option('--with-qux', True, when='--with-foo')
option('--with-qux', help='qux')
set_config('QUX', depends('--with-qux')(lambda x: x))
'''):
config = self.get_config()
self.assertEquals(config, {
'QUX': NegativeOptionValue(),
})
config = self.get_config(['--with-foo'])
self.assertEquals(config, {
'QUX': PositiveOptionValue(),
})
def test_option_failures(self):
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('option("--with-foo", help="foo")'):
self.get_config()
self.assertEquals(
e.exception.message,
'Option `--with-foo` is not handled ; reference it with a @depends'
)
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
option("--with-foo", help="foo")
option("--with-foo", help="foo")
'''):
self.get_config()
self.assertEquals(
e.exception.message,
'Option `--with-foo` already defined'
)
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
option(env="MOZ_FOO", help="foo")
option(env="MOZ_FOO", help="foo")
'''):
self.get_config()
self.assertEquals(
e.exception.message,
'Option `MOZ_FOO` already defined'
)
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
option('--with-foo', env="MOZ_FOO", help="foo")
option(env="MOZ_FOO", help="foo")
'''):
self.get_config()
self.assertEquals(
e.exception.message,
'Option `MOZ_FOO` already defined'
)
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
option(env="MOZ_FOO", help="foo")
option('--with-foo', env="MOZ_FOO", help="foo")
'''):
self.get_config()
self.assertEquals(
e.exception.message,
'Option `MOZ_FOO` already defined'
)
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
option('--with-foo', env="MOZ_FOO", help="foo")
option('--with-foo', help="foo")
'''):
self.get_config()
self.assertEquals(
e.exception.message,
'Option `--with-foo` already defined'
)
def test_option_when(self):
with self.moz_configure('''
option('--with-foo', help='foo', when=True)
option('--with-bar', help='bar', when=False)
option('--with-qux', env="QUX", help='qux', when='--with-foo')
set_config('FOO', depends('--with-foo', when=True)(lambda x: x))
set_config('BAR', depends('--with-bar', when=False)(lambda x: x))
set_config('QUX', depends('--with-qux', when='--with-foo')(lambda x: x))
'''):
config = self.get_config()
self.assertEquals(config, {
'FOO': NegativeOptionValue(),
})
config = self.get_config(['--with-foo'])
self.assertEquals(config, {
'FOO': PositiveOptionValue(),
'QUX': NegativeOptionValue(),
})
config = self.get_config(['--with-foo', '--with-qux'])
self.assertEquals(config, {
'FOO': PositiveOptionValue(),
'QUX': PositiveOptionValue(),
})
with self.assertRaises(InvalidOptionError) as e:
self.get_config(['--with-bar'])
self.assertEquals(
e.exception.message,
'--with-bar is not available in this configuration'
)
with self.assertRaises(InvalidOptionError) as e:
self.get_config(['--with-qux'])
self.assertEquals(
e.exception.message,
'--with-qux is not available in this configuration'
)
with self.assertRaises(InvalidOptionError) as e:
self.get_config(['QUX=1'])
self.assertEquals(
e.exception.message,
'QUX is not available in this configuration'
)
config = self.get_config(env={'QUX': '1'})
self.assertEquals(config, {
'FOO': NegativeOptionValue(),
})
help, config = self.get_config(['--help'])
self.assertEquals(help, textwrap.dedent('''\
Usage: configure [options]
Options: [defaults in brackets after descriptions]
--help print this message
--with-foo foo
Environment variables:
'''))
help, config = self.get_config(['--help', '--with-foo'])
self.assertEquals(help, textwrap.dedent('''\
Usage: configure [options]
Options: [defaults in brackets after descriptions]
--help print this message
--with-foo foo
--with-qux qux
Environment variables:
'''))
with self.moz_configure('''
option('--with-foo', help='foo', when=True)
set_config('FOO', depends('--with-foo')(lambda x: x))
'''):
with self.assertRaises(ConfigureError) as e:
self.get_config()
self.assertEquals(e.exception.message,
'@depends function needs the same `when` as '
'options it depends on')
with self.moz_configure('''
@depends(when=True)
def always():
return True
@depends(when=True)
def always2():
return True
option('--with-foo', help='foo', when=always)
set_config('FOO', depends('--with-foo', when=always2)(lambda x: x))
'''):
with self.assertRaises(ConfigureError) as e:
self.get_config()
self.assertEquals(e.exception.message,
'@depends function needs the same `when` as '
'options it depends on')
def test_include_failures(self):
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('include("../foo.configure")'):
self.get_config()
self.assertEquals(
e.exception.message,
'Cannot include `%s` because it is not in a subdirectory of `%s`'
% (mozpath.normpath(mozpath.join(test_data_path, '..',
'foo.configure')),
mozpath.normsep(test_data_path))
)
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
include('extra.configure')
include('extra.configure')
'''):
self.get_config()
self.assertEquals(
e.exception.message,
'Cannot include `%s` because it was included already.'
% mozpath.normpath(mozpath.join(test_data_path,
'extra.configure'))
)
with self.assertRaises(TypeError) as e:
with self.moz_configure('''
include(42)
'''):
self.get_config()
self.assertEquals(e.exception.message, "Unexpected type: 'int'")
def test_include_when(self):
with MockedOpen({
os.path.join(test_data_path, 'moz.configure'): textwrap.dedent('''
option('--with-foo', help='foo')
include('always.configure', when=True)
include('never.configure', when=False)
include('foo.configure', when='--with-foo')
set_config('FOO', foo)
set_config('BAR', bar)
set_config('QUX', qux)
'''),
os.path.join(test_data_path, 'always.configure'): textwrap.dedent('''
option('--with-bar', help='bar')
@depends('--with-bar')
def bar(x):
if x:
return 'bar'
'''),
os.path.join(test_data_path, 'never.configure'): textwrap.dedent('''
option('--with-qux', help='qux')
@depends('--with-qux')
def qux(x):
if x:
return 'qux'
'''),
os.path.join(test_data_path, 'foo.configure'): textwrap.dedent('''
option('--with-foo-really', help='really foo')
@depends('--with-foo-really')
def foo(x):
if x:
return 'foo'
include('foo2.configure', when='--with-foo-really')
'''),
os.path.join(test_data_path, 'foo2.configure'): textwrap.dedent('''
set_config('FOO2', True)
'''),
}):
config = self.get_config()
self.assertEquals(config, {})
config = self.get_config(['--with-foo'])
self.assertEquals(config, {})
config = self.get_config(['--with-bar'])
self.assertEquals(config, {
'BAR': 'bar',
})
with self.assertRaises(InvalidOptionError) as e:
self.get_config(['--with-qux'])
self.assertEquals(
e.exception.message,
'--with-qux is not available in this configuration'
)
config = self.get_config(['--with-foo', '--with-foo-really'])
self.assertEquals(config, {
'FOO': 'foo',
'FOO2': True,
})
def test_sandbox_failures(self):
with self.assertRaises(KeyError) as e:
with self.moz_configure('''
include = 42
'''):
self.get_config()
self.assertEquals(e.exception.message, 'Cannot reassign builtins')
with self.assertRaises(KeyError) as e:
with self.moz_configure('''
foo = 42
'''):
self.get_config()
self.assertEquals(e.exception.message,
'Cannot assign `foo` because it is neither a '
'@depends nor a @template')
def test_depends_failures(self):
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
@depends()
def foo():
return
'''):
self.get_config()
self.assertEquals(e.exception.message,
"@depends needs at least one argument")
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
@depends('--with-foo')
def foo(value):
return value
'''):
self.get_config()
self.assertEquals(e.exception.message,
"'--with-foo' is not a known option. Maybe it's "
"declared too late?")
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
@depends('--with-foo=42')
def foo(value):
return value
'''):
self.get_config()
self.assertEquals(e.exception.message,
"Option must not contain an '='")
with self.assertRaises(TypeError) as e:
with self.moz_configure('''
@depends(42)
def foo(value):
return value
'''):
self.get_config()
self.assertEquals(e.exception.message,
"Cannot use object of type 'int' as argument "
"to @depends")
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
@depends('--help')
def foo(value):
yield
'''):
self.get_config()
self.assertEquals(e.exception.message,
"Cannot decorate generator functions with @depends")
with self.assertRaises(TypeError) as e:
with self.moz_configure('''
depends('--help')(42)
'''):
self.get_config()
self.assertEquals(e.exception.message,
"Unexpected type: 'int'")
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
option('--foo', help='foo')
@depends('--foo')
def foo(value):
return value
foo()
'''):
self.get_config()
self.assertEquals(e.exception.message,
"The `foo` function may not be called")
with self.assertRaises(TypeError) as e:
with self.moz_configure('''
@depends('--help', foo=42)
def foo(_):
return
'''):
self.get_config()
self.assertEquals(e.exception.message,
"depends_impl() got an unexpected keyword argument 'foo'")
def test_depends_when(self):
with self.moz_configure('''
@depends(when=True)
def foo():
return 'foo'
set_config('FOO', foo)
@depends(when=False)
def bar():
return 'bar'
set_config('BAR', bar)
option('--with-qux', help='qux')
@depends(when='--with-qux')
def qux():
return 'qux'
set_config('QUX', qux)
'''):
config = self.get_config()
self.assertEquals(config, {
'FOO': 'foo',
})
config = self.get_config(['--with-qux'])
self.assertEquals(config, {
'FOO': 'foo',
'QUX': 'qux',
})
def test_imports_failures(self):
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
@imports('os')
@template
def foo(value):
return value
'''):
self.get_config()
self.assertEquals(e.exception.message,
'@imports must appear after @template')
with self.assertRaises(ConfigureError) as e:
with self.moz_configure('''
option('--foo', help='foo')
@imports('os')
@depends('--foo')
def foo(value):
return value
'''):
self.get_config()
self.assertEquals(e.exception.message,
'@imports must appear after @depends')
for import_ in (
"42",
"_from=42, _import='os'",
"_from='os', _import='path', _as=42",
):
with self.assertRaises(TypeError) as e:
with self.moz_configure('''
@imports(%s)
@template
def foo(value):
return value
''' % import_):
self.get_config()
self.assertEquals(e.exception.message, "Unexpected type: 'int'")
with self.assertRaises(TypeError) as e:
with self.moz_configure('''
@imports('os', 42)
@template
def foo(value):
return value
'''):
self.get_config()
self.assertEquals(e.exception.message, "Unexpected type: 'int'")
with self.assertRaises(ValueError) as e:
with self.moz_configure('''
@imports('os*')
def foo(value):
return value
'''):
self.get_config()
self.assertEquals(e.exception.message,
"Invalid argument to @imports: 'os*'")
def test_only_when(self):
moz_configure = '''
option('--enable-when', help='when')
@depends('--enable-when', '--help')
def when(value, _):
return bool(value)
with only_when(when):
option('--foo', nargs='*', help='foo')
@depends('--foo')
def foo(value):
return value
set_config('FOO', foo)
set_define('FOO', foo)
# It is possible to depend on a function defined in a only_when
# block. It then resolves to `None`.
set_config('BAR', depends(foo)(lambda x: x))
set_define('BAR', depends(foo)(lambda x: x))
'''
with self.moz_configure(moz_configure):
config = self.get_config()
self.assertEqual(config, {
'DEFINES': {},
})
config = self.get_config(['--enable-when'])
self.assertEqual(config, {
'BAR': NegativeOptionValue(),
'FOO': NegativeOptionValue(),
'DEFINES': {
'BAR': NegativeOptionValue(),
'FOO': NegativeOptionValue(),
},
})
config = self.get_config(['--enable-when', '--foo=bar'])
self.assertEqual(config, {
'BAR': PositiveOptionValue(['bar']),
'FOO': PositiveOptionValue(['bar']),
'DEFINES': {
'BAR': PositiveOptionValue(['bar']),
'FOO': PositiveOptionValue(['bar']),
},
})
# The --foo option doesn't exist when --enable-when is not given.
with self.assertRaises(InvalidOptionError) as e:
self.get_config(['--foo'])
self.assertEquals(e.exception.message,
'--foo is not available in this configuration')
# Cannot depend on an option defined in a only_when block, because we
# don't know what OptionValue would make sense.
with self.moz_configure(moz_configure + '''
set_config('QUX', depends('--foo')(lambda x: x))
'''):
with self.assertRaises(ConfigureError) as e:
self.get_config()
self.assertEquals(e.exception.message,
'@depends function needs the same `when` as '
'options it depends on')
with self.moz_configure(moz_configure + '''
set_config('QUX', depends('--foo', when=when)(lambda x: x))
'''):
self.get_config(['--enable-when'])
# Using imply_option for an option defined in a only_when block fails
# similarly if the imply_option happens outside the block.
with self.moz_configure('''
imply_option('--foo', True)
''' + moz_configure):
with self.assertRaises(InvalidOptionError) as e:
self.get_config()
self.assertEquals(e.exception.message,
'--foo is not available in this configuration')
# And similarly doesn't fail when the condition is true.
with self.moz_configure('''
imply_option('--foo', True)
''' + moz_configure):
self.get_config(['--enable-when'])
if __name__ == '__main__':
main()
| mpl-2.0 | 2,962,910,890,875,734,000 | 33.35271 | 84 | 0.503213 | false |
Kozea/Pynuts | docs/conf.py | 1 | 8611 | # -*- coding: utf-8 -*-
#
# Pynuts documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 24 10:37:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pynuts'
copyright = u'2012, Kozea'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
VERSION = re.search("__version__ = '([^']+)'", open(
os.path.join(os.path.dirname(__file__), '..', 'pynuts', '__init__.py')
).read().strip()).group(1)
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': 'logo.png',
'index_logo_height': '120px'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = "logo-pynuts.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon-pynuts.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pynutsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Pynuts.tex', u'Pynuts Documentation',
u'Kozea', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pynuts', u'Pynuts Documentation',
[u'Kozea'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pynuts', u'Pynuts Documentation',
u'Kozea', 'Pynuts', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autodoc_member_order = 'bysource'
themes_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '_sphinx-themes'))
sys.path.append(themes_path)
html_theme_path = [themes_path]
html_theme = 'flask'
html_sidebars = {
'index': ['sidebarintro.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
| bsd-3-clause | -5,100,894,083,876,475,000 | 31.617424 | 103 | 0.69748 | false |
mupen64plus/mupen64plus-ui-python | src/m64py/frontend/settings.py | 1 | 14083 | # -*- coding: utf-8 -*-
# Author: Milan Nikolic <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from PyQt5.QtCore import Qt, QSettings
from PyQt5.QtWidgets import QDialog, QFileDialog, QRadioButton, QVBoxLayout
from m64py.core.defs import *
from m64py.loader import find_library
from m64py.core.vidext import MODES
from m64py.platform import DLL_FILTER
from m64py.frontend.plugin import Plugin
from m64py.frontend.input import Input
from m64py.ui.settings_ui import Ui_Settings
class Settings(QDialog, Ui_Settings):
"""Settings dialog"""
def __init__(self, parent):
QDialog.__init__(self, parent)
self.parent = parent
self.setupUi(self)
self.core = None
self.plugins = []
self.emumode = []
self.combomap = {}
self.qset = QSettings("m64py", "m64py")
self.qset.setDefaultFormat(QSettings.IniFormat)
self.add_items()
self.connect_signals()
def showEvent(self, event):
self.set_config()
def closeEvent(self, event):
self.save_config()
def add_items(self):
self.combomap = {
M64PLUGIN_RSP: (
self.comboRSP, self.pushButtonRSP,
Plugin(self.parent)),
M64PLUGIN_GFX: (
self.comboVideo, self.pushButtonVideo,
Plugin(self.parent)),
M64PLUGIN_AUDIO: (
self.comboAudio, self.pushButtonAudio,
Plugin(self.parent)),
M64PLUGIN_INPUT: (
self.comboInput, self.pushButtonInput,
Input(self.parent))
}
self.emumode = [
QRadioButton(self.tr("Pure Interpreter")),
QRadioButton(self.tr("Cached Interpreter")),
QRadioButton(self.tr("Dynamic Recompiler"))
]
vbox = QVBoxLayout(self.groupEmuMode)
for widget in self.emumode:
vbox.addWidget(widget)
def show_page(self, index=0):
self.tabWidget.setCurrentIndex(index)
self.show()
def save_config(self):
self.save_paths()
self.save_plugins()
if self.core and self.core.get_handle():
self.save_video()
self.save_core()
self.core.config.save_file()
self.qset.sync()
def set_config(self):
if self.core and self.core.get_handle():
self.set_paths()
self.set_plugins()
self.set_video()
self.set_core()
def on_vidext_changed(self, state):
self.parent.vidext = state
self.comboResolution.setEnabled(not self.parent.vidext)
self.checkFullscreen.setEnabled(not self.parent.vidext)
self.parent.worker.quit()
self.parent.worker.init()
def connect_signals(self):
self.browseLibrary.clicked.connect(lambda: self.browse_dialog(
(self.pathLibrary, self.groupLibrary, False)))
self.browsePlugins.clicked.connect(lambda: self.browse_dialog(
(self.pathPlugins, self.groupPlugins, True)))
self.browseData.clicked.connect(lambda: self.browse_dialog(
(self.pathData, self.groupData, True)))
self.browseROM.clicked.connect(lambda: self.browse_dialog(
(self.pathROM, self.groupROM, True)))
self.checkEnableVidExt.clicked.connect(self.on_vidext_changed)
for plugin_type in self.combomap:
self.connect_combo_signals(self.combomap[plugin_type])
def connect_combo_signals(self, combomap):
combo, button, settings = combomap
if settings is not None:
if combo != self.comboInput:
combo.activated.connect(
lambda: self.set_section(combo, button, settings))
button.clicked.connect(settings.show_dialog)
def browse_dialog(self, args):
widget, groupbox, directory = args
dialog = QFileDialog()
if directory:
dialog.setFileMode(QFileDialog.Directory)
path = dialog.getExistingDirectory(
self, groupbox.title(), widget.text(), QFileDialog.ShowDirsOnly)
else:
dialog.setFileMode(QFileDialog.ExistingFile)
path, _ = dialog.getOpenFileName(
self, groupbox.title(), widget.text(),
"%s (*%s);;All files (*)" % (groupbox.title(), DLL_FILTER))
if not path:
return
widget.setText(path)
if widget == self.pathLibrary:
self.parent.worker.quit()
if not self.parent.worker.core.get_handle():
self.parent.worker.init(path)
if self.parent.worker.core.get_handle():
self.core = self.parent.worker.core
self.set_core()
self.set_video()
self.parent.window_size_triggered(self.get_size_safe())
self.parent.state_changed.emit((True, False, False, False))
elif widget == self.pathPlugins:
self.parent.worker.plugins_shutdown()
self.parent.worker.plugins_unload()
self.parent.worker.plugins_load(path)
self.parent.worker.plugins_startup()
self.set_plugins()
def get_int_safe(self, key, default):
try:
return int(self.qset.value(key, default))
except ValueError:
return default
def get_size_safe(self):
try:
size = self.qset.value("size", SIZE_1X)
except TypeError:
size = SIZE_1X
if not type(size) == tuple:
size = SIZE_1X
if len(size) != 2:
size = SIZE_1X
if type(size[0]) != int or type(size[1]) != int:
size = SIZE_1X
if size[0] <= 0 or size[1] <= 0:
size = SIZE_1X
return size
def get_parameter_help_safe(self, parameter):
help = self.core.config.get_parameter_help("NoCompiledJump")
if help is not None:
return help.decode()
return ""
def get_section(self, combo):
plugin = combo.currentText()
index = combo.findText(plugin)
desc = combo.itemData(index)
name = os.path.splitext(plugin)[0][12:]
section = "-".join([n.capitalize() for n in name.split("-")[0:2]])
return section, desc
def set_section(self, combo, button, settings):
if settings:
if combo != self.comboInput:
section, desc = self.get_section(combo)
settings.set_section(section, desc)
self.core.config.open_section(section)
items = self.core.config.parameters[
self.core.config.section].items()
if items:
button.setEnabled(True)
else:
button.setEnabled(False)
else:
button.setEnabled(True)
else:
button.setEnabled(False)
def set_paths(self):
path_library = self.qset.value(
"Paths/Library", find_library(CORE_NAME))
path_data = self.qset.value(
"Paths/Data", self.core.config.get_path("SharedData"))
path_roms = self.qset.value("Paths/ROM")
try:
path_plugins = self.qset.value("Paths/Plugins", os.path.realpath(
os.path.dirname(self.parent.worker.plugin_files[0])))
except IndexError:
path_plugins = ""
try:
self.pathROM.setText(path_roms)
except TypeError:
pass
self.pathLibrary.setText(path_library)
self.pathPlugins.setText(path_plugins)
self.pathData.setText(path_data)
def set_video(self):
self.core.config.open_section("Video-General")
self.set_resolution()
self.checkEnableVidExt.setChecked(
bool(self.get_int_safe("enable_vidext", 1)))
self.checkFullscreen.setChecked(
bool(self.core.config.get_parameter("Fullscreen")))
self.checkFullscreen.setEnabled(not self.parent.vidext)
self.checkVsync.setChecked(
bool(self.core.config.get_parameter("VerticalSync")))
self.checkVsync.setToolTip(
self.get_parameter_help_safe("VerticalSync"))
if sys.platform == "win32":
self.checkKeepAspect.setChecked(False)
self.checkKeepAspect.setEnabled(False)
else:
keep_aspect = bool(self.get_int_safe("keep_aspect", 1))
self.checkKeepAspect.setChecked(keep_aspect)
disable_screensaver = bool(self.get_int_safe("disable_screensaver", 1))
self.checkDisableScreenSaver.setChecked(disable_screensaver)
def set_core(self):
self.core.config.open_section("Core")
mode = self.core.config.get_parameter("R4300Emulator")
self.emumode[mode].setChecked(True)
self.checkOSD.setChecked(
self.core.config.get_parameter("OnScreenDisplay"))
self.checkOSD.setToolTip(
self.get_parameter_help_safe("OnScreenDisplay"))
self.checkNoCompiledJump.setChecked(
self.core.config.get_parameter("NoCompiledJump"))
self.checkNoCompiledJump.setToolTip(
self.get_parameter_help_safe("NoCompiledJump"))
self.checkDisableExtraMem.setChecked(
self.core.config.get_parameter("DisableExtraMem"))
self.checkDisableExtraMem.setToolTip(
self.get_parameter_help_safe("DisableExtraMem"))
count_per_op = self.core.config.get_parameter("CountPerOp")
if count_per_op is not None:
self.comboCountPerOp.setCurrentIndex(count_per_op)
else:
self.comboCountPerOp.setEnabled(False)
self.comboCountPerOp.setToolTip(
self.get_parameter_help_safe("CountPerOp"))
def set_plugins(self):
plugin_map = self.core.plugin_map
for plugin_type in self.combomap:
combo, button, settings = self.combomap[plugin_type]
combo.clear()
for plugin in plugin_map[plugin_type].values():
(plugin_handle, plugin_path, plugin_name,
plugin_desc, plugin_version) = plugin
name = os.path.basename(plugin_path)
combo.addItem(name)
index = combo.findText(str(name))
combo.setItemData(index, plugin_desc)
combo.setItemData(index, plugin_desc, Qt.ToolTipRole)
current = self.qset.value("Plugins/%s" % (
PLUGIN_NAME[plugin_type]), PLUGIN_DEFAULT[plugin_type])
index = combo.findText(current)
if index == -1:
index = 0
combo.setCurrentIndex(index)
self.set_section(combo, button, settings)
def set_resolution(self):
width = self.core.config.get_parameter("ScreenWidth")
height = self.core.config.get_parameter("ScreenHeight")
if (width, height) not in MODES:
MODES.append((width, height))
self.comboResolution.clear()
for mode in MODES:
w, h = mode
self.comboResolution.addItem(
"%sx%s" % (w, h), (w, h))
index = self.comboResolution.findText(
"%sx%s" % (width, height), Qt.MatchExactly)
if index == -1: index = 0
self.comboResolution.setCurrentIndex(index)
self.comboResolution.setEnabled(not self.parent.vidext)
def save_paths(self):
self.qset.setValue("Paths/Library", self.pathLibrary.text())
self.qset.setValue("Paths/Plugins", self.pathPlugins.text())
self.qset.setValue("Paths/Data", self.pathData.text())
self.qset.setValue("Paths/ROM", self.pathROM.text())
def save_video(self):
self.core.config.open_section("Video-General")
if self.parent.vidext:
width, height = self.get_size_safe()
else:
width, height = self.comboResolution.currentText().split("x")
self.core.config.set_parameter("ScreenWidth", int(width))
self.core.config.set_parameter("ScreenHeight", int(height))
self.core.config.set_parameter("Fullscreen", self.checkFullscreen.isChecked())
self.core.config.set_parameter("VerticalSync", self.checkVsync.isChecked())
self.qset.setValue("keep_aspect", int(self.checkKeepAspect.isChecked()))
self.qset.setValue("disable_screensaver", int(self.checkDisableScreenSaver.isChecked()))
self.qset.setValue("enable_vidext", int(self.checkEnableVidExt.isChecked()))
def save_core(self):
self.core.config.open_section("Core")
emumode = [n for n,m in enumerate(self.emumode) if m.isChecked()][0]
self.core.config.set_parameter("R4300Emulator", emumode)
self.core.config.set_parameter("OnScreenDisplay", self.checkOSD.isChecked())
self.core.config.set_parameter("NoCompiledJump", self.checkNoCompiledJump.isChecked())
self.core.config.set_parameter("DisableExtraMem", self.checkDisableExtraMem.isChecked())
self.core.config.set_parameter("CountPerOp", self.comboCountPerOp.currentIndex())
self.core.config.set_parameter("SharedDataPath", self.pathData.text().encode())
def save_plugins(self):
for plugin_type in self.combomap:
combo, button, settings = self.combomap[plugin_type]
self.qset.setValue("Plugins/%s" % PLUGIN_NAME[plugin_type], combo.currentText())
| gpl-3.0 | 4,558,125,903,167,661,000 | 37.903315 | 96 | 0.609813 | false |
kawamuray/ganeti | qa/qa_rapi.py | 1 | 34486 | #
#
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Remote API QA tests.
"""
import functools
import itertools
import random
import re
import tempfile
from ganeti import cli
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import locking
from ganeti import objects
from ganeti import opcodes
from ganeti import pathutils
from ganeti import qlang
from ganeti import query
from ganeti import rapi
from ganeti import utils
from ganeti.http.auth import ParsePasswordFile
import ganeti.rapi.client # pylint: disable=W0611
import ganeti.rapi.client_utils
import qa_config
import qa_error
import qa_logging
import qa_utils
from qa_instance import IsDiskReplacingSupported
from qa_instance import IsFailoverSupported
from qa_instance import IsMigrationSupported
from qa_job_utils import RunWithLocks
from qa_utils import (AssertEqual, AssertIn, AssertMatch, StartLocalCommand)
from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG
_rapi_ca = None
_rapi_client = None
_rapi_username = None
_rapi_password = None
def Setup(username, password):
"""Configures the RAPI client.
"""
# pylint: disable=W0603
# due to global usage
global _rapi_ca
global _rapi_client
global _rapi_username
global _rapi_password
_rapi_username = username
_rapi_password = password
master = qa_config.GetMasterNode()
# Load RAPI certificate from master node
cmd = ["cat", qa_utils.MakeNodePath(master, pathutils.RAPI_CERT_FILE)]
# Write to temporary file
_rapi_ca = tempfile.NamedTemporaryFile()
_rapi_ca.write(qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd)))
_rapi_ca.flush()
port = qa_config.get("rapi-port", default=constants.DEFAULT_RAPI_PORT)
cfg_curl = rapi.client.GenericCurlConfig(cafile=_rapi_ca.name,
proxy="")
if qa_config.UseVirtualCluster():
# TODO: Implement full support for RAPI on virtual clusters
print qa_logging.FormatWarning("RAPI tests are not yet supported on"
" virtual clusters and will be disabled")
assert _rapi_client is None
else:
_rapi_client = rapi.client.GanetiRapiClient(master.primary, port=port,
username=username,
password=password,
curl_config_fn=cfg_curl)
print "RAPI protocol version: %s" % _rapi_client.GetVersion()
return _rapi_client
def LookupRapiSecret(rapi_user):
"""Find the RAPI secret for the given user.
@param rapi_user: Login user
@return: Login secret for the user
"""
CTEXT = "{CLEARTEXT}"
master = qa_config.GetMasterNode()
cmd = ["cat", qa_utils.MakeNodePath(master, pathutils.RAPI_USERS_FILE)]
file_content = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
users = ParsePasswordFile(file_content)
entry = users.get(rapi_user)
if not entry:
raise qa_error.Error("User %s not found in RAPI users file" % rapi_user)
secret = entry.password
if secret.upper().startswith(CTEXT):
secret = secret[len(CTEXT):]
elif secret.startswith("{"):
raise qa_error.Error("Unsupported password schema for RAPI user %s:"
" not a clear text password" % rapi_user)
return secret
INSTANCE_FIELDS = ("name", "os", "pnode", "snodes",
"admin_state",
"disk_template", "disk.sizes", "disk.spindles",
"nic.ips", "nic.macs", "nic.modes", "nic.links",
"beparams", "hvparams",
"oper_state", "oper_ram", "oper_vcpus", "status", "tags")
NODE_FIELDS = ("name", "dtotal", "dfree", "sptotal", "spfree",
"mtotal", "mnode", "mfree",
"pinst_cnt", "sinst_cnt", "tags")
GROUP_FIELDS = compat.UniqueFrozenset([
"name", "uuid",
"alloc_policy",
"node_cnt", "node_list",
])
JOB_FIELDS = compat.UniqueFrozenset([
"id", "ops", "status", "summary",
"opstatus", "opresult", "oplog",
"received_ts", "start_ts", "end_ts",
])
LIST_FIELDS = ("id", "uri")
def Enabled():
"""Return whether remote API tests should be run.
"""
# TODO: Implement RAPI tests for virtual clusters
return (qa_config.TestEnabled("rapi") and
not qa_config.UseVirtualCluster())
def _DoTests(uris):
# pylint: disable=W0212
# due to _SendRequest usage
results = []
for uri, verify, method, body in uris:
assert uri.startswith("/")
print "%s %s" % (method, uri)
data = _rapi_client._SendRequest(method, uri, None, body)
if verify is not None:
if callable(verify):
verify(data)
else:
AssertEqual(data, verify)
results.append(data)
return results
# pylint: disable=W0212
# Due to _SendRequest usage
def _DoGetPutTests(get_uri, modify_uri, opcode_params, rapi_only_aliases=None,
modify_method="PUT", exceptions=None, set_exceptions=None):
""" Test if all params of an object can be retrieved, and set as well.
@type get_uri: string
@param get_uri: The URI from which information about the object can be
retrieved.
@type modify_uri: string
@param modify_uri: The URI which can be used to modify the object.
@type opcode_params: list of tuple
@param opcode_params: The parameters of the underlying opcode, used to
determine which parameters are actually present.
@type rapi_only_aliases: list of string or None
@param rapi_only_aliases: Aliases for parameters which differ from the opcode,
and become renamed before opcode submission.
@type modify_method: string
@param modify_method: The method to be used in the modification.
@type exceptions: list of string or None
@param exceptions: The parameters which have not been exposed and should not
be tested at all.
@type set_exceptions: list of string or None
@param set_exceptions: The parameters whose setting should not be tested as a
part of this test.
"""
assert get_uri.startswith("/")
assert modify_uri.startswith("/")
if exceptions is None:
exceptions = []
if set_exceptions is None:
set_exceptions = []
print "Testing get/modify symmetry of %s and %s" % (get_uri, modify_uri)
# First we see if all parameters of the opcode are returned through RAPI
params_of_interest = map(lambda x: x[0], opcode_params)
# The RAPI-specific aliases are to be checked as well
if rapi_only_aliases is not None:
params_of_interest.extend(rapi_only_aliases)
info = _rapi_client._SendRequest("GET", get_uri, None, {})
missing_params = filter(lambda x: x not in info and x not in exceptions,
params_of_interest)
if missing_params:
raise qa_error.Error("The parameters %s which can be set through the "
"appropriate opcode are not present in the response "
"from %s" % (','.join(missing_params), get_uri))
print "GET successful at %s" % get_uri
# Then if we can perform a set with the same values as received
put_payload = {}
for param in params_of_interest:
if param not in exceptions and param not in set_exceptions:
put_payload[param] = info[param]
_rapi_client._SendRequest(modify_method, modify_uri, None, put_payload)
print "%s successful at %s" % (modify_method, modify_uri)
# pylint: enable=W0212
def _VerifyReturnsJob(data):
if not isinstance(data, int):
AssertMatch(data, r"^\d+$")
def TestVersion():
"""Testing remote API version.
"""
_DoTests([
("/version", constants.RAPI_VERSION, "GET", None),
])
def TestEmptyCluster():
"""Testing remote API on an empty cluster.
"""
master = qa_config.GetMasterNode()
master_full = qa_utils.ResolveNodeName(master)
def _VerifyInfo(data):
AssertIn("name", data)
AssertIn("master", data)
AssertEqual(data["master"], master_full)
def _VerifyNodes(data):
master_entry = {
"id": master_full,
"uri": "/2/nodes/%s" % master_full,
}
AssertIn(master_entry, data)
def _VerifyNodesBulk(data):
for node in data:
for entry in NODE_FIELDS:
AssertIn(entry, node)
def _VerifyGroups(data):
default_group = {
"name": constants.INITIAL_NODE_GROUP_NAME,
"uri": "/2/groups/" + constants.INITIAL_NODE_GROUP_NAME,
}
AssertIn(default_group, data)
def _VerifyGroupsBulk(data):
for group in data:
for field in GROUP_FIELDS:
AssertIn(field, group)
_DoTests([
("/", None, "GET", None),
("/2/info", _VerifyInfo, "GET", None),
("/2/tags", None, "GET", None),
("/2/nodes", _VerifyNodes, "GET", None),
("/2/nodes?bulk=1", _VerifyNodesBulk, "GET", None),
("/2/groups", _VerifyGroups, "GET", None),
("/2/groups?bulk=1", _VerifyGroupsBulk, "GET", None),
("/2/instances", [], "GET", None),
("/2/instances?bulk=1", [], "GET", None),
("/2/os", None, "GET", None),
])
# Test HTTP Not Found
for method in ["GET", "PUT", "POST", "DELETE"]:
try:
_DoTests([("/99/resource/not/here/99", None, method, None)])
except rapi.client.GanetiApiError, err:
AssertEqual(err.code, 404)
else:
raise qa_error.Error("Non-existent resource didn't return HTTP 404")
# Test HTTP Not Implemented
for method in ["PUT", "POST", "DELETE"]:
try:
_DoTests([("/version", None, method, None)])
except rapi.client.GanetiApiError, err:
AssertEqual(err.code, 501)
else:
raise qa_error.Error("Non-implemented method didn't fail")
# Test GET/PUT symmetry
LEGITIMATELY_MISSING = [
"force", # Standard option
"add_uids", # Modifies UID pool, is not a param itself
"remove_uids", # Same as above
"osparams_private_cluster", # Should not be returned
]
NOT_EXPOSED_YET = ["hv_state", "disk_state", "modify_etc_hosts"]
# The nicparams are returned under the default entry, yet accepted as they
# are - this is a TODO to fix!
DEFAULT_ISSUES = ["nicparams"]
_DoGetPutTests("/2/info", "/2/modify", opcodes.OpClusterSetParams.OP_PARAMS,
exceptions=(LEGITIMATELY_MISSING + NOT_EXPOSED_YET),
set_exceptions=DEFAULT_ISSUES)
def TestRapiQuery():
"""Testing resource queries via remote API.
"""
# FIXME: the tests are failing if no LVM is enabled, investigate
# if it is a bug in the QA or in the code
if not qa_config.IsStorageTypeSupported(constants.ST_LVM_VG):
return
master_name = qa_utils.ResolveNodeName(qa_config.GetMasterNode())
rnd = random.Random(7818)
for what in constants.QR_VIA_RAPI:
if what == constants.QR_JOB:
namefield = "id"
trivial_filter = [qlang.OP_GE, namefield, 0]
elif what == constants.QR_EXPORT:
namefield = "export"
trivial_filter = [qlang.OP_REGEXP, ".*", namefield]
else:
namefield = "name"
trivial_filter = [qlang.OP_REGEXP, ".*", namefield]
all_fields = query.ALL_FIELDS[what].keys()
rnd.shuffle(all_fields)
# No fields, should return everything
result = _rapi_client.QueryFields(what)
qresult = objects.QueryFieldsResponse.FromDict(result)
AssertEqual(len(qresult.fields), len(all_fields))
# One field
result = _rapi_client.QueryFields(what, fields=[namefield])
qresult = objects.QueryFieldsResponse.FromDict(result)
AssertEqual(len(qresult.fields), 1)
# Specify all fields, order must be correct
result = _rapi_client.QueryFields(what, fields=all_fields)
qresult = objects.QueryFieldsResponse.FromDict(result)
AssertEqual(len(qresult.fields), len(all_fields))
AssertEqual([fdef.name for fdef in qresult.fields], all_fields)
# Unknown field
result = _rapi_client.QueryFields(what, fields=["_unknown!"])
qresult = objects.QueryFieldsResponse.FromDict(result)
AssertEqual(len(qresult.fields), 1)
AssertEqual(qresult.fields[0].name, "_unknown!")
AssertEqual(qresult.fields[0].kind, constants.QFT_UNKNOWN)
# Try once more, this time without the client
_DoTests([
("/2/query/%s/fields" % what, None, "GET", None),
("/2/query/%s/fields?fields=name,name,%s" % (what, all_fields[0]),
None, "GET", None),
])
# Try missing query argument
try:
_DoTests([
("/2/query/%s" % what, None, "GET", None),
])
except rapi.client.GanetiApiError, err:
AssertEqual(err.code, 400)
else:
raise qa_error.Error("Request missing 'fields' parameter didn't fail")
def _Check(exp_fields, data):
qresult = objects.QueryResponse.FromDict(data)
AssertEqual([fdef.name for fdef in qresult.fields], exp_fields)
if not isinstance(qresult.data, list):
raise qa_error.Error("Query did not return a list")
_DoTests([
# Specify fields in query
("/2/query/%s?fields=%s" % (what, ",".join(all_fields)),
compat.partial(_Check, all_fields), "GET", None),
("/2/query/%s?fields=%s" % (what, namefield),
compat.partial(_Check, [namefield]), "GET", None),
# Note the spaces
("/2/query/%s?fields=%s,%%20%s%%09,%s%%20" %
(what, namefield, namefield, namefield),
compat.partial(_Check, [namefield] * 3), "GET", None)])
if what in constants.QR_VIA_RAPI_PUT:
_DoTests([
# PUT with fields in query
("/2/query/%s?fields=%s" % (what, namefield),
compat.partial(_Check, [namefield]), "PUT", {}),
("/2/query/%s" % what, compat.partial(_Check, [namefield] * 4), "PUT", {
"fields": [namefield] * 4,
}),
("/2/query/%s" % what, compat.partial(_Check, all_fields), "PUT", {
"fields": all_fields,
}),
("/2/query/%s" % what, compat.partial(_Check, [namefield] * 4), "PUT", {
"fields": [namefield] * 4
})])
def _CheckFilter():
_DoTests([
# With filter
("/2/query/%s" % what, compat.partial(_Check, all_fields), "PUT", {
"fields": all_fields,
"filter": trivial_filter
}),
])
if what == constants.QR_LOCK:
# Locks can't be filtered
try:
_CheckFilter()
except rapi.client.GanetiApiError, err:
AssertEqual(err.code, 500)
else:
raise qa_error.Error("Filtering locks didn't fail")
else:
if what in constants.QR_VIA_RAPI_PUT:
_CheckFilter()
if what == constants.QR_NODE:
# Test with filter
(nodes, ) = _DoTests(
[("/2/query/%s" % what,
compat.partial(_Check, ["name", "master"]), "PUT",
{"fields": ["name", "master"],
"filter": [qlang.OP_TRUE, "master"],
})])
qresult = objects.QueryResponse.FromDict(nodes)
AssertEqual(qresult.data, [
[[constants.RS_NORMAL, master_name], [constants.RS_NORMAL, True]],
])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstance(instance):
"""Testing getting instance(s) info via remote API.
"""
def _VerifyInstance(data):
for entry in INSTANCE_FIELDS:
AssertIn(entry, data)
def _VerifyInstancesList(data):
for instance in data:
for entry in LIST_FIELDS:
AssertIn(entry, instance)
def _VerifyInstancesBulk(data):
for instance_data in data:
_VerifyInstance(instance_data)
_DoTests([
("/2/instances/%s" % instance.name, _VerifyInstance, "GET", None),
("/2/instances", _VerifyInstancesList, "GET", None),
("/2/instances?bulk=1", _VerifyInstancesBulk, "GET", None),
("/2/instances/%s/activate-disks" % instance.name,
_VerifyReturnsJob, "PUT", None),
("/2/instances/%s/deactivate-disks" % instance.name,
_VerifyReturnsJob, "PUT", None),
])
# Test OpBackupPrepare
(job_id, ) = _DoTests([
("/2/instances/%s/prepare-export?mode=%s" %
(instance.name, constants.EXPORT_MODE_REMOTE),
_VerifyReturnsJob, "PUT", None),
])
result = _WaitForRapiJob(job_id)[0]
AssertEqual(len(result["handshake"]), 3)
AssertEqual(result["handshake"][0], constants.RIE_VERSION)
AssertEqual(len(result["x509_key_name"]), 3)
AssertIn("-----BEGIN CERTIFICATE-----", result["x509_ca"])
def TestNode(node):
"""Testing getting node(s) info via remote API.
"""
def _VerifyNode(data):
for entry in NODE_FIELDS:
AssertIn(entry, data)
def _VerifyNodesList(data):
for node in data:
for entry in LIST_FIELDS:
AssertIn(entry, node)
def _VerifyNodesBulk(data):
for node_data in data:
_VerifyNode(node_data)
_DoTests([
("/2/nodes/%s" % node.primary, _VerifyNode, "GET", None),
("/2/nodes", _VerifyNodesList, "GET", None),
("/2/nodes?bulk=1", _VerifyNodesBulk, "GET", None),
])
# Not parameters of the node, but controlling opcode behavior
LEGITIMATELY_MISSING = ["force", "powered"]
# Identifying the node - RAPI provides these itself
IDENTIFIERS = ["node_name", "node_uuid"]
# As the name states, these can be set but not retrieved yet
NOT_EXPOSED_YET = ["hv_state", "disk_state", "auto_promote"]
_DoGetPutTests("/2/nodes/%s" % node.primary,
"/2/nodes/%s/modify" % node.primary,
opcodes.OpNodeSetParams.OP_PARAMS,
modify_method="POST",
exceptions=(LEGITIMATELY_MISSING + NOT_EXPOSED_YET +
IDENTIFIERS))
def _FilterTags(seq):
"""Removes unwanted tags from a sequence.
"""
ignore_re = qa_config.get("ignore-tags-re", None)
if ignore_re:
return itertools.ifilterfalse(re.compile(ignore_re).match, seq)
else:
return seq
def TestTags(kind, name, tags):
"""Tests .../tags resources.
"""
if kind == constants.TAG_CLUSTER:
uri = "/2/tags"
elif kind == constants.TAG_NODE:
uri = "/2/nodes/%s/tags" % name
elif kind == constants.TAG_INSTANCE:
uri = "/2/instances/%s/tags" % name
elif kind == constants.TAG_NODEGROUP:
uri = "/2/groups/%s/tags" % name
elif kind == constants.TAG_NETWORK:
uri = "/2/networks/%s/tags" % name
else:
raise errors.ProgrammerError("Unknown tag kind")
def _VerifyTags(data):
AssertEqual(sorted(tags), sorted(_FilterTags(data)))
queryargs = "&".join("tag=%s" % i for i in tags)
# Add tags
(job_id, ) = _DoTests([
("%s?%s" % (uri, queryargs), _VerifyReturnsJob, "PUT", None),
])
_WaitForRapiJob(job_id)
# Retrieve tags
_DoTests([
(uri, _VerifyTags, "GET", None),
])
# Remove tags
(job_id, ) = _DoTests([
("%s?%s" % (uri, queryargs), _VerifyReturnsJob, "DELETE", None),
])
_WaitForRapiJob(job_id)
def _WaitForRapiJob(job_id):
"""Waits for a job to finish.
"""
def _VerifyJob(data):
AssertEqual(data["id"], job_id)
for field in JOB_FIELDS:
AssertIn(field, data)
_DoTests([
("/2/jobs/%s" % job_id, _VerifyJob, "GET", None),
])
return rapi.client_utils.PollJob(_rapi_client, job_id,
cli.StdioJobPollReportCb())
def TestRapiNodeGroups():
"""Test several node group operations using RAPI.
"""
(group1, group2, group3) = qa_utils.GetNonexistentGroups(3)
# Create a group with no attributes
body = {
"name": group1,
}
(job_id, ) = _DoTests([
("/2/groups", _VerifyReturnsJob, "POST", body),
])
_WaitForRapiJob(job_id)
# Create a group specifying alloc_policy
body = {
"name": group2,
"alloc_policy": constants.ALLOC_POLICY_UNALLOCABLE,
}
(job_id, ) = _DoTests([
("/2/groups", _VerifyReturnsJob, "POST", body),
])
_WaitForRapiJob(job_id)
# Modify alloc_policy
body = {
"alloc_policy": constants.ALLOC_POLICY_UNALLOCABLE,
}
(job_id, ) = _DoTests([
("/2/groups/%s/modify" % group1, _VerifyReturnsJob, "PUT", body),
])
_WaitForRapiJob(job_id)
# Rename a group
body = {
"new_name": group3,
}
(job_id, ) = _DoTests([
("/2/groups/%s/rename" % group2, _VerifyReturnsJob, "PUT", body),
])
_WaitForRapiJob(job_id)
# Test for get/set symmetry
# Identifying the node - RAPI provides these itself
IDENTIFIERS = ["group_name"]
# As the name states, not exposed yet
NOT_EXPOSED_YET = ["hv_state", "disk_state"]
# The parameters we do not want to get and set (as that sets the
# group-specific params to the filled ones)
FILLED_PARAMS = ["ndparams", "ipolicy", "diskparams"]
# The aliases that we can use to perform this test with the group-specific
# params
CUSTOM_PARAMS = ["custom_ndparams", "custom_ipolicy", "custom_diskparams"]
_DoGetPutTests("/2/groups/%s" % group3, "/2/groups/%s/modify" % group3,
opcodes.OpGroupSetParams.OP_PARAMS,
rapi_only_aliases=CUSTOM_PARAMS,
exceptions=(IDENTIFIERS + NOT_EXPOSED_YET),
set_exceptions=FILLED_PARAMS)
# Delete groups
for group in [group1, group3]:
(job_id, ) = _DoTests([
("/2/groups/%s" % group, _VerifyReturnsJob, "DELETE", None),
])
_WaitForRapiJob(job_id)
def TestRapiInstanceAdd(node, use_client):
"""Test adding a new instance via RAPI"""
if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
return
instance = qa_config.AcquireInstance()
instance.SetDiskTemplate(constants.DT_PLAIN)
try:
disks = [{"size": utils.ParseUnit(d.get("size")),
"name": str(d.get("name"))}
for d in qa_config.GetDiskOptions()]
nic0_mac = instance.GetNicMacAddr(0, constants.VALUE_GENERATE)
nics = [{
constants.INIC_MAC: nic0_mac,
}]
beparams = {
constants.BE_MAXMEM: utils.ParseUnit(qa_config.get(constants.BE_MAXMEM)),
constants.BE_MINMEM: utils.ParseUnit(qa_config.get(constants.BE_MINMEM)),
}
if use_client:
job_id = _rapi_client.CreateInstance(constants.INSTANCE_CREATE,
instance.name,
constants.DT_PLAIN,
disks, nics,
os=qa_config.get("os"),
pnode=node.primary,
beparams=beparams)
else:
body = {
"__version__": 1,
"mode": constants.INSTANCE_CREATE,
"name": instance.name,
"os_type": qa_config.get("os"),
"disk_template": constants.DT_PLAIN,
"pnode": node.primary,
"beparams": beparams,
"disks": disks,
"nics": nics,
}
(job_id, ) = _DoTests([
("/2/instances", _VerifyReturnsJob, "POST", body),
])
_WaitForRapiJob(job_id)
return instance
except:
instance.Release()
raise
def _GenInstanceAllocationDict(node, instance):
"""Creates an instance allocation dict to be used with the RAPI"""
instance.SetDiskTemplate(constants.DT_PLAIN)
disks = [{"size": utils.ParseUnit(d.get("size")),
"name": str(d.get("name"))}
for d in qa_config.GetDiskOptions()]
nic0_mac = instance.GetNicMacAddr(0, constants.VALUE_GENERATE)
nics = [{
constants.INIC_MAC: nic0_mac,
}]
beparams = {
constants.BE_MAXMEM: utils.ParseUnit(qa_config.get(constants.BE_MAXMEM)),
constants.BE_MINMEM: utils.ParseUnit(qa_config.get(constants.BE_MINMEM)),
}
return _rapi_client.InstanceAllocation(constants.INSTANCE_CREATE,
instance.name,
constants.DT_PLAIN,
disks, nics,
os=qa_config.get("os"),
pnode=node.primary,
beparams=beparams)
def TestRapiInstanceMultiAlloc(node):
"""Test adding two new instances via the RAPI instance-multi-alloc method"""
if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
return
JOBS_KEY = "jobs"
instance_one = qa_config.AcquireInstance()
instance_two = qa_config.AcquireInstance()
instance_list = [instance_one, instance_two]
try:
rapi_dicts = map(functools.partial(_GenInstanceAllocationDict, node),
instance_list)
job_id = _rapi_client.InstancesMultiAlloc(rapi_dicts)
results, = _WaitForRapiJob(job_id)
if JOBS_KEY not in results:
raise qa_error.Error("RAPI instance-multi-alloc did not deliver "
"information about created jobs")
if len(results[JOBS_KEY]) != len(instance_list):
raise qa_error.Error("RAPI instance-multi-alloc failed to return the "
"desired number of jobs!")
for success, job in results[JOBS_KEY]:
if success:
_WaitForRapiJob(job)
else:
raise qa_error.Error("Failed to create instance in "
"instance-multi-alloc call")
except:
# Note that although released, it may be that some of the instance creations
# have in fact succeeded. Handling this in a better way may be possible, but
# is not necessary as the QA has already failed at this point.
for instance in instance_list:
instance.Release()
raise
return (instance_one, instance_two)
@InstanceCheck(None, INST_DOWN, FIRST_ARG)
def TestRapiInstanceRemove(instance, use_client):
"""Test removing instance via RAPI"""
# FIXME: this does not work if LVM is not enabled. Find out if this is a bug
# in RAPI or in the test
if not qa_config.IsStorageTypeSupported(constants.ST_LVM_VG):
return
if use_client:
job_id = _rapi_client.DeleteInstance(instance.name)
else:
(job_id, ) = _DoTests([
("/2/instances/%s" % instance.name, _VerifyReturnsJob, "DELETE", None),
])
_WaitForRapiJob(job_id)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestRapiInstanceMigrate(instance):
"""Test migrating instance via RAPI"""
if not IsMigrationSupported(instance):
print qa_logging.FormatInfo("Instance doesn't support migration, skipping"
" test")
return
# Move to secondary node
_WaitForRapiJob(_rapi_client.MigrateInstance(instance.name))
qa_utils.RunInstanceCheck(instance, True)
# And back to previous primary
_WaitForRapiJob(_rapi_client.MigrateInstance(instance.name))
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestRapiInstanceFailover(instance):
"""Test failing over instance via RAPI"""
if not IsFailoverSupported(instance):
print qa_logging.FormatInfo("Instance doesn't support failover, skipping"
" test")
return
# Move to secondary node
_WaitForRapiJob(_rapi_client.FailoverInstance(instance.name))
qa_utils.RunInstanceCheck(instance, True)
# And back to previous primary
_WaitForRapiJob(_rapi_client.FailoverInstance(instance.name))
@InstanceCheck(INST_UP, INST_DOWN, FIRST_ARG)
def TestRapiInstanceShutdown(instance):
"""Test stopping an instance via RAPI"""
_WaitForRapiJob(_rapi_client.ShutdownInstance(instance.name))
@InstanceCheck(INST_DOWN, INST_UP, FIRST_ARG)
def TestRapiInstanceStartup(instance):
"""Test starting an instance via RAPI"""
_WaitForRapiJob(_rapi_client.StartupInstance(instance.name))
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestRapiInstanceRenameAndBack(rename_source, rename_target):
"""Test renaming instance via RAPI
This must leave the instance with the original name (in the
non-failure case).
"""
_WaitForRapiJob(_rapi_client.RenameInstance(rename_source, rename_target))
qa_utils.RunInstanceCheck(rename_source, False)
qa_utils.RunInstanceCheck(rename_target, False)
_WaitForRapiJob(_rapi_client.RenameInstance(rename_target, rename_source))
qa_utils.RunInstanceCheck(rename_target, False)
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestRapiInstanceReinstall(instance):
"""Test reinstalling an instance via RAPI"""
if instance.disk_template == constants.DT_DISKLESS:
print qa_logging.FormatInfo("Test not supported for diskless instances")
return
_WaitForRapiJob(_rapi_client.ReinstallInstance(instance.name))
# By default, the instance is started again
qa_utils.RunInstanceCheck(instance, True)
# Reinstall again without starting
_WaitForRapiJob(_rapi_client.ReinstallInstance(instance.name,
no_startup=True))
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestRapiInstanceReplaceDisks(instance):
"""Test replacing instance disks via RAPI"""
if not IsDiskReplacingSupported(instance):
print qa_logging.FormatInfo("Instance doesn't support disk replacing,"
" skipping test")
return
fn = _rapi_client.ReplaceInstanceDisks
_WaitForRapiJob(fn(instance.name,
mode=constants.REPLACE_DISK_AUTO, disks=[]))
_WaitForRapiJob(fn(instance.name,
mode=constants.REPLACE_DISK_SEC, disks="0"))
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestRapiInstanceModify(instance):
"""Test modifying instance via RAPI"""
default_hv = qa_config.GetDefaultHypervisor()
def _ModifyInstance(**kwargs):
_WaitForRapiJob(_rapi_client.ModifyInstance(instance.name, **kwargs))
_ModifyInstance(beparams={
constants.BE_VCPUS: 3,
})
_ModifyInstance(beparams={
constants.BE_VCPUS: constants.VALUE_DEFAULT,
})
if default_hv == constants.HT_XEN_PVM:
_ModifyInstance(hvparams={
constants.HV_KERNEL_ARGS: "single",
})
_ModifyInstance(hvparams={
constants.HV_KERNEL_ARGS: constants.VALUE_DEFAULT,
})
elif default_hv == constants.HT_XEN_HVM:
_ModifyInstance(hvparams={
constants.HV_BOOT_ORDER: "acn",
})
_ModifyInstance(hvparams={
constants.HV_BOOT_ORDER: constants.VALUE_DEFAULT,
})
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestRapiInstanceConsole(instance):
"""Test getting instance console information via RAPI"""
result = _rapi_client.GetInstanceConsole(instance.name)
console = objects.InstanceConsole.FromDict(result)
AssertEqual(console.Validate(), None)
AssertEqual(console.instance, qa_utils.ResolveInstanceName(instance.name))
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestRapiStoppedInstanceConsole(instance):
"""Test getting stopped instance's console information via RAPI"""
try:
_rapi_client.GetInstanceConsole(instance.name)
except rapi.client.GanetiApiError, err:
AssertEqual(err.code, 503)
else:
raise qa_error.Error("Getting console for stopped instance didn't"
" return HTTP 503")
def GetOperatingSystems():
"""Retrieves a list of all available operating systems.
"""
return _rapi_client.GetOperatingSystems()
def _InvokeMoveInstance(current_dest_inst, current_src_inst, rapi_pw_filename,
joint_master, perform_checks, target_nodes=None):
""" Invokes the move-instance tool for testing purposes.
"""
# Some uses of this test might require that RAPI-only commands are used,
# and the checks are command-line based.
if perform_checks:
qa_utils.RunInstanceCheck(current_dest_inst, False)
cmd = [
"../tools/move-instance",
"--verbose",
"--src-ca-file=%s" % _rapi_ca.name,
"--src-username=%s" % _rapi_username,
"--src-password-file=%s" % rapi_pw_filename,
"--dest-instance-name=%s" % current_dest_inst,
]
if target_nodes:
pnode, snode = target_nodes
cmd.extend([
"--dest-primary-node=%s" % pnode,
"--dest-secondary-node=%s" % snode,
])
else:
cmd.extend([
"--iallocator=%s" % constants.IALLOC_HAIL,
"--opportunistic-tries=1",
])
cmd.extend([
"--net=0:mac=%s" % constants.VALUE_GENERATE,
joint_master,
joint_master,
current_src_inst,
])
AssertEqual(StartLocalCommand(cmd).wait(), 0)
if perform_checks:
qa_utils.RunInstanceCheck(current_src_inst, False)
qa_utils.RunInstanceCheck(current_dest_inst, True)
def TestInterClusterInstanceMove(src_instance, dest_instance,
inodes, tnode, perform_checks=True):
"""Test tools/move-instance"""
master = qa_config.GetMasterNode()
rapi_pw_file = tempfile.NamedTemporaryFile()
rapi_pw_file.write(_rapi_password)
rapi_pw_file.flush()
# Needed only if checks are to be performed
if perform_checks:
dest_instance.SetDiskTemplate(src_instance.disk_template)
# TODO: Run some instance tests before moving back
if len(inodes) > 1:
# No disk template currently requires more than 1 secondary node. If this
# changes, either this test must be skipped or the script must be updated.
assert len(inodes) == 2
snode = inodes[1]
else:
# Instance is not redundant, but we still need to pass a node
# (which will be ignored)
snode = tnode
pnode = inodes[0]
# pnode:snode are the *current* nodes, and the first move is an
# iallocator-guided move outside of pnode. The node lock for the pnode
# assures that this happens, and while we cannot be sure where the instance
# will land, it is a real move.
locks = {locking.LEVEL_NODE: [pnode.primary]}
RunWithLocks(_InvokeMoveInstance, locks, 600.0, False,
dest_instance.name, src_instance.name, rapi_pw_file.name,
master.primary, perform_checks)
# And then back to pnode:snode
_InvokeMoveInstance(src_instance.name, dest_instance.name, rapi_pw_file.name,
master.primary, perform_checks,
target_nodes=(pnode.primary, snode.primary))
| gpl-2.0 | 6,929,556,656,399,017,000 | 30.379436 | 80 | 0.638462 | false |
sdu14SoftwareEngineering/GameOfLife_WEB | GameOfLife/urls.py | 1 | 1435 | from django.conf.urls import url
from django.contrib import admin
from game import views
from game.method import in_room
from game.method import user, in_game, ready_game
urlpatterns = [
url(r'^admin', admin.site.urls), # 系统自带
url(r'^$', views.room_select), # 首页
url(r'^room', views.room), # 游戏页面
url(r'^test', in_room.test),
# user
url(r'^login$', user.login), # 登录
url(r'^login_by_cookie', user.login_by_cookie), # 通过cookie登录
# in_room
url(r'^get_rooms$', in_room.get_rooms), # 获得分页信息
url(r'^new_room$', in_room.new_room), # 新建房间
url(r'^join_room_by_id$', in_room.join_room_by_id), # 进入指定房间
url(r'^join_room_random$', in_room.join_room_random), # 进入指定房间
url(r'^exit_room_by_id$', in_room.exit_room_by_id), # 进入指定房间
# ready_game
url(r'^get_room_info$', ready_game.get_room_info), # 获得房间的用户及准备信息
url(r'^change_user_status$', ready_game.change_user_status), # 用户准备
url(r'^begin_game$', ready_game.begin_game), # 房主开始游戏
# in_game
url(r'^get_field$', in_game.get_field), # 获得布局和游戏进度
url(r'^change_field$', in_game.change_field), # 玩家修改布局
url(r'^get_game_result$', in_game.get_game_result), # 返回游戏结果
url(r'^exit_game$', in_game.exit_game), # 退出游戏
]
| apache-2.0 | 8,273,772,036,118,305,000 | 38.15625 | 72 | 0.628093 | false |
MathYourLife/TSatPy-thesis | tex/sample_scripts/Controllers_05.py | 1 | 3623 |
import time
import numpy as np
import matplotlib.pyplot as plt
from TSatPy import Controller, State
from TSatPy import StateOperator as SO
from TSatPy.Clock import Metronome
from GradientDescent import GradientDescent
print("Test P - Attitude and Body Rate Control")
run_time = 60
speed = 20
c = Metronome()
c.set_speed(speed)
dt = 0.5
x_d = State.State()
def run_test(Kq, Kpwx, Kpwy, Kpwz, plot=False):
ts, Ms, ws, theta = test(Kq, Kpwx, Kpwy, Kpwz)
if plot:
graph_it(ts, Ms, ws, theta)
def test(Kq, Kpwx, Kpwy, Kpwz):
# Randomize the initial condition of the plant
x_est_ic = State.State(
State.Quaternion(np.random.rand(3,1),radians=np.random.rand()),
State.BodyRate(np.random.rand(3.1)))
I = [[4, 0, 0], [0, 4, 0], [0, 0, 2]]
plant_est = State.Plant(I, x_est_ic, c)
Kp = SO.StateToMoment(
SO.QuaternionToMoment(Kq),
SO.BodyRateToMoment([[Kpwx,0,0],[0,Kpwy,0],[0,0,Kpwz]]))
pid = Controller.PID(c)
pid.set_Kp(Kp)
pid.set_desired_state(x_d)
M = State.Moment()
ts = []
Ms = []
ws = []
theta = []
start_time = c.tick()
end_time = c.tick() + run_time
while c.tick() < end_time:
time.sleep(dt / float(speed))
plant_est.propagate(M)
x_plant = plant_est.x
M = pid.update(x_plant)
ts.append(c.tick() - start_time)
Ms.append((M.M[0,0],M.M[1,0],M.M[2,0]))
e, r = x_plant.q.to_rotation()
theta.append(r)
ws.append((x_plant.w.w[0,0],x_plant.w.w[1,0],x_plant.w.w[2,0]))
return ts, Ms, ws, theta
def grid_me(ax):
ax.grid(color='0.75', linestyle='--', linewidth=1)
def graph_it(ts, Ms, ws, theta):
fig = plt.figure(dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(3,1,1)
ax.plot(ts, [M[0] for M in Ms], c='b', label=r'$M_x$', lw=2)
ax.plot(ts, [M[1] for M in Ms], c='r', label=r'$M_y$', lw=2)
ax.plot(ts, [M[2] for M in Ms], c='g', label=r'$M_z$', lw=2)
ax.set_ylabel(r'Moment (Nm)')
grid_me(ax)
plt.legend(prop={'size':10})
ax = fig.add_subplot(3,1,2)
ax.plot(ts, theta, c='b', label=r'$\theta$', lw=2)
ax.set_ylabel(r'Quaternion Angle (rad)')
grid_me(ax)
plt.legend(prop={'size':10})
ax = fig.add_subplot(3,1,3)
ax.plot(ts, [w[0] for w in ws], c='b', label=r'$\omega_x$', lw=2)
ax.plot(ts, [w[1] for w in ws], c='r', label=r'$\omega_y$', lw=2)
ax.plot(ts, [w[2] for w in ws], c='g', label=r'$\omega_z$', lw=2)
ax.set_ylabel(r'Body Rate (rad/sec)')
grid_me(ax)
plt.legend(prop={'size':10})
ax.set_xlabel('$t(k)$ seconds')
plt.tight_layout()
plt.show()
def calc_err(ts, Ms, ws, theta):
M = np.array(Ms)
theta = np.array(theta)
cost = np.abs(M).mean(axis=0).sum()
return cost
def main():
domains = [
['Kq', 0.001, 0.9],
['Kpwx', 0.001, 0.9],
['Kpwy', 0.001, 0.9],
['Kpwz', 0.001, 0.9],
]
kwargs = {
# Number of iterations to run
'N': 200,
# Definition of parameter search domain
'domains': domains,
# Function that will run a test
'run_test': test,
# Function that will take the return of run_test and determine
# how well the parameters worked.
'calc_cost': calc_err,
}
print(GradientDescent.descend(**kwargs))
return 0
if __name__ == '__main__':
kwargs = {'Kpwx': 0.494, 'Kpwy': 0.583, 'Kq': 0.0912, 'Kpwz': 0.624}
# kwargs = None
if kwargs is not None:
kwargs['plot'] = True
run_test(**kwargs)
else:
exit(main())
| mit | -975,508,984,488,423,700 | 23.815068 | 72 | 0.556169 | false |
mcr/ietfdb | ietf/person/models.py | 1 | 6025 | # Copyright The IETF Trust 2007, All Rights Reserved
import datetime
from django.db import models
from django.contrib.auth.models import User
from ietf.person.name import name_parts, initials
class PersonInfo(models.Model):
time = models.DateTimeField(default=datetime.datetime.now) # When this Person record entered the system
name = models.CharField(max_length=255, db_index=True) # The normal unicode form of the name. This must be
# set to the same value as the ascii-form if equal.
ascii = models.CharField(max_length=255) # The normal ascii-form of the name.
ascii_short = models.CharField(max_length=32, null=True, blank=True) # The short ascii-form of the name. Also in alias table if non-null
address = models.TextField(max_length=255, blank=True)
affiliation = models.CharField(max_length=255, blank=True)
def __unicode__(self):
return self.plain_name()
def name_parts(self):
return name_parts(self.name)
def ascii_parts(self):
return name_parts(self.ascii)
def short(self):
if self.ascii_short:
return self.ascii_short
else:
prefix, first, middle, last, suffix = self.ascii_parts()
return (first and first[0]+"." or "")+(middle or "")+" "+last+(suffix and " "+suffix or "")
def plain_name(self):
if self.ascii_short:
return self.ascii_short
prefix, first, middle, last, suffix = name_parts(self.name)
return u" ".join([first, last])
def initials(self):
return initials(self.ascii or self.name)
def last_name(self):
return name_parts(self.name)[3]
def role_email(self, role_name, group=None):
"""Lookup email for role for person, optionally on group which
may be an object or the group acronym."""
if group:
if isinstance(group, str) or isinstance(group, unicode):
group = Group.objects.get(acronym=group)
e = Email.objects.filter(person=self, role__group=group, role__name=role_name)
else:
e = Email.objects.filter(person=self, role__group__state="active", role__name=role_name)
if e:
return e[0]
# no cigar, try the complete set before giving up
e = self.email_set.order_by("-active", "-time")
if e:
return e[0]
return None
def email_address(self):
e = self.email_set.filter(active=True).order_by("-time")
if e:
return e[0].address
else:
return ""
def formatted_email(self):
e = self.email_set.order_by("-active", "-time")
if e:
return e[0].formatted_email()
else:
return ""
def full_name_as_key(self):
# this is mostly a remnant from the old views, needed in the menu
return self.plain_name().lower().replace(" ", ".")
class Meta:
abstract = True
class PersonManager(models.Manager):
def by_email(self, email):
results = self.get_query_set().filter(user__email = email)
if len(results)>0:
return results[0]
else:
return None
def by_username(self, username):
results = self.get_query_set().filter(user__username = username)
if len(results)>0:
return results[0]
else:
return None
class Person(PersonInfo):
objects = PersonManager()
user = models.OneToOneField(User, blank=True, null=True)
def person(self): # little temporary wrapper to help porting to new schema
return self
def url(self, sitefqdn):
return "%s/people/%s.json" % (sitefqdn, self.id)
# person json not yet implemented
#def json_dict(self, sitefqdn):
# ct1 = dict()
# ct1['person_id'] = self.id
# ct1['href'] = self.url(sitefqdn)
# ct1['name'] = self.name
# ct1['ascii'] = self.ascii
# ct1['affliation']= self.affliation
# return ct1
class PersonHistory(PersonInfo):
person = models.ForeignKey(Person, related_name="history_set")
user = models.ForeignKey(User, blank=True, null=True)
class Alias(models.Model):
"""This is used for alternative forms of a name. This is the
primary lookup point for names, and should always contain the
unicode form (and ascii form, if different) of a name which is
recorded in the Person record.
"""
person = models.ForeignKey(Person)
name = models.CharField(max_length=255, db_index=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = "Aliases"
class Email(models.Model):
address = models.CharField(max_length=64, primary_key=True)
person = models.ForeignKey(Person, null=True)
time = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True) # Old email addresses are *not* purged, as history
# information points to persons through these
def __unicode__(self):
return self.address
def get_name(self):
return self.person.plain_name() if self.person else self.address
def formatted_email(self):
if self.person and self.person.ascii:
return u'"%s" <%s>' % (self.person.ascii, self.address)
else:
return self.address
def invalid_address(self):
# we have some legacy authors with unknown email addresses
return self.address.startswith("unknown-email") and "@" not in self.address
def email_address(self):
"""Get valid, current email address; in practise, for active,
non-invalid addresses it is just the address field. In other
cases, we default to person's email address."""
if self.invalid_address() or not self.active:
if self.person:
return self.person.email_address()
return
return self.address
| bsd-3-clause | -5,651,668,828,004,410,000 | 37.621795 | 146 | 0.614606 | false |
Swind/TuringCoffee | src/heater_server.py | 1 | 2463 | import pid_controller
from utils import json_config
from utils import channel
class HeaterServer(object):
def __init__(self):
# Read Config
self.config = json_config.parse_json('config.json')
# Create PID controller
self.pid_controller = pid_controller.PIDController(self.config)
# That other process can subscribe the pid controller status
self.pub_channel = channel.Channel(
self.config['HeaterServer']['Publish_Socket_Address'], 'Pub', True)
# Receive the pid controller command
self.cmd_channel = channel.Channel(
self.config['HeaterServer']['Command_Socket_Address'], 'Pair', True)
def __pid_controller_observer(self, *pid_status):
self.publish_pid_status(*pid_status)
def start(self):
# Start pid controller thread
self.pid_controller.start()
# Add observer to publish pid status
self.pid_controller.add_observer(self.__pid_controller_observer)
# The main thread will receive and set the pid parameters by nanomsg
self.receive_pid_parameters()
# ============================================================================================================
#
# nanomsg API
#
# ============================================================================================================
def publish_pid_status(self, cycle_time, duty_cycle, set_point, temperature):
"""
Publish pid status:
e.g
{
"cycle_time": 5,
"duty_cycle": 70,
"set_point": 80,
"temperature": 26.53
}
"""
# print "Publish cycle_time:{}, duty_cycle:{},
# set_point:{}".format(cycle_time, duty_cycle, set_point, temperature)
self.pub_channel.send({'cycle_time': cycle_time, 'duty_cycle':
duty_cycle, 'set_point': set_point, 'temperature': temperature})
def receive_pid_parameters(self):
"""
Receive pid parameters:
e.g
{
"cycle_time": 1,
"k": 44,
"i": 165,
"d": 4,
"set_point": 80
}
"""
# The main thread will handle the command socket
while(True):
cmd = self.cmd_channel.recv()
self.pid_controller.set_params(
cmd['cycle_time'], cmd['k'], cmd['i'], cmd['d'], cmd['set_point'])
| mit | 2,292,491,661,193,839,600 | 31.84 | 114 | 0.509947 | false |
dash-dash/pyzmq-mdp | setup.py | 1 | 1319 | # -*- coding: utf-8 -*-
"""Module containing worker functionality for the MDP implementation.
For the MDP specification see: http://rfc.zeromq.org/spec:7
"""
__license__ = """
This file is part of MDP.
MDP is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MDP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MDP. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'shykes (Solomon Hykes)'
__email__ = '[email protected]'
from setuptools import setup
setup(
name = 'pyzmq-mdp',
version = '0.2',
description = 'ZeroMQ MDP protocol in Python using pyzmq',
author = 'Guido Goldstein',
author_email= '[email protected]',
url = 'https://github.com/guidog/pyzmq-mdp',
package_dir = {'mdp': 'mdp'},
packages = ['mdp'],
zip_safe = False,
use_2to3 = True
)
| gpl-3.0 | -3,930,020,499,070,619,600 | 30.404762 | 72 | 0.662623 | false |
Scandie/openprocurement.tender.esco | openprocurement/tender/esco/tests/tender.py | 1 | 3188 | # -*- coding: utf-8 -*-
import unittest
from openprocurement.tender.openeu.constants import TENDERING_DAYS
from openprocurement.tender.esco.tests.base import (
test_tender_data, test_lots, test_bids,
BaseESCOWebTest, BaseESCOEUContentWebTest,
)
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.tests.tender import TenderResourceTestMixin
from openprocurement.tender.belowthreshold.tests.tender_blanks import (
#TenderProcessTest
invalid_tender_conditions,
#TenderResourceTest
guarantee,
)
from openprocurement.tender.openua.tests.tender import TenderUAResourceTestMixin
from openprocurement.tender.openeu.tests.tender_blanks import (
#TenderProcessTest
one_bid_tender,
unsuccessful_after_prequalification_tender,
one_qualificated_bid_tender,
multiple_bidders_tender,
lost_contract_for_active_award,
#TenderResourceTest
patch_tender,
invalid_bid_tender_lot,
)
from openprocurement.tender.esco.tests.tender_blanks import (
#TenderESCOEUTest
simple_add_tender,
tender_value,
tender_min_value,
#TestTenderEU
create_tender_invalid,
tender_with_nbu_discount_rate,
invalid_bid_tender_features,
create_tender_generated,
)
class TenderESCOEUTest(BaseESCOWebTest):
initial_auth = ('Basic', ('broker', ''))
initial_data = test_tender_data
test_simple_add_tender = snitch(simple_add_tender)
test_tender_value = snitch(tender_value)
test_tender_min_value = snitch(tender_min_value)
class TestTenderEU(BaseESCOEUContentWebTest, TenderResourceTestMixin, TenderUAResourceTestMixin):
""" ESCO EU tender test """
initialize_initial_data = False
initial_data = test_tender_data
test_lots_data = test_lots
test_bids_data = test_bids
tender_period_duration = TENDERING_DAYS
test_tender_with_nbu_discount_rate = snitch(tender_with_nbu_discount_rate)
test_create_tender_invalid = snitch(create_tender_invalid)
test_create_tender_generated = snitch(create_tender_generated)
test_patch_tender = snitch(patch_tender)
test_guarantee = snitch(guarantee)
test_invalid_bid_tender_features = snitch(invalid_bid_tender_features)
test_invalid_bid_tender_lot = snitch(invalid_bid_tender_lot)
class TestTenderEUProcess(BaseESCOEUContentWebTest):
initialize_initial_data = False
initial_data = test_tender_data
test_bids_data = test_bids
test_invalid_tender_conditions = snitch(invalid_tender_conditions)
test_one_bid_tender = snitch(one_bid_tender)
test_unsuccessful_after_prequalification_tender = snitch(unsuccessful_after_prequalification_tender)
test_one_qualificated_bid_tender = snitch(one_qualificated_bid_tender)
test_multiple_bidders_tender = snitch(multiple_bidders_tender)
test_lost_contract_for_active_award = snitch(lost_contract_for_active_award)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderESCOEUTest))
suite.addTest(unittest.makeSuite(TestTenderEU))
suite.addTest(unittest.makeSuite(TestTenderEUProcess))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 | 2,218,055,869,105,780,500 | 32.914894 | 104 | 0.752823 | false |
vivekec/datascience | projects/1. linear_regression/USA_wrestlers/scripts/relation.py | 1 | 2551 | # No relation between Rank and Matches Fought
import subprocess as sp
tmp = sp.call('cls',shell=True)
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
df = pd.read_excel('db/usa_wrestlers_db.xlsx')
# Checking missing values
df = df.replace({'?':np.nan}) # Replacing missing values (?) with NaN
print((df.count()/len(df)) * 100) # Percentage of missing values
# Removing 'Rank #' from Rank column
df['Rank'] = df['Rank'].replace({'Rank #':''},regex=True).astype(int)
print(df['Rank'].head())
# Extracting number of matches from Mathces Fought feature
import re
df['Matches Fought'] = df['Matches Fought'].replace({np.nan:'0'})
for i in range(1,2008):
res = re.search(r'\d+', df['Matches Fought'][i])
if res == None:
df['Matches Fought'][i] = '0'
else:
df['Matches Fought'][i] = res.group()
print(df['Matches Fought'])
# Since extraction of all 2000+ players was taking a lot time. So I saved the
# previous data into a new file and then used it for further program
df.to_excel('db/new_file.xlsx')
df = pd.read_excel('db/new_file.xlsx')
# Checking correlation
plt.figure()
plt.scatter(df['Matches Fought'], df['Rank'])
plt.title('Checking correlation')
plt.xlabel('Matches Fought')
plt.ylabel('Rank')
## Let us remove all those players who played 0 matches
df['Matches Fought'] = df['Matches Fought'].replace({0:np.nan})
df = df[np.isfinite(df['Matches Fought'])]
#print(df.count())
plt.figure()
plt.scatter(df['Matches Fought'], df['Rank'])
plt.title('Checking correlation after removing players with no info Matches Fought')
plt.xlabel('Matches Fought')
plt.ylabel('Rank')
X_train, X_test, y_train, y_test = train_test_split(
df['Matches Fought'], df['Rank'], test_size = 0.33, # Test data will be 33% of data
random_state = 42) # assign it to some values, to get same values on each fresh run
X_train = np.transpose(np.matrix(X_train))
y_train = np.transpose(np.matrix(y_train))
X_test = np.transpose(np.matrix(X_test))
y_test = np.transpose(np.matrix(y_test))
lm = LinearRegression()
lm.fit(X_train, y_train)
pred_test = lm.predict(X_test)
plt.figure()
plt.scatter([y_test[:,0]],[pred_test[:,0]])
plt.title('Relationship between Actual and Predicted Ranks')
plt.xlabel('Actual Rank')
plt.ylabel('Predicted Rank')
## Hence, it proves there is no correlation between Players Rank and number of matches played | gpl-3.0 | 299,602,565,531,816,260 | 29.380952 | 93 | 0.698942 | false |
wroberts/annotator | tests/test_forms.py | 1 | 1111 | # -*- coding: utf-8 -*-
"""Test forms."""
from annotator.user.forms import ExtendedRegisterForm
class TestRegisterForm:
"""Register form."""
def test_validate_email_already_registered(self, user):
"""Enter email that is already registered."""
form = ExtendedRegisterForm(first_name='unique',
last_name='unique',
email=user.email,
password='example',
password_confirm='example')
assert form.validate() is False
assert '{} is already associated with an account.'.format(user.email) in form.email.errors
def test_validate_success(self, db):
"""Register with success."""
form = ExtendedRegisterForm(first_name='newfirstname',
last_name='newlastname',
email='[email protected]',
password='example',
password_confirm='example')
assert form.validate() is True
| bsd-3-clause | -2,132,118,967,734,094,600 | 38.678571 | 98 | 0.508551 | false |
LukaszObara/LeNet5 | DataProcessing.py | 1 | 2677 | # DataProcessing.py
# Libraries
# Third-Party Libraries
import pandas as pd
import numpy as np
def augment(data: np.array, size: int) -> np.array:
import scipy.ndimage
augmented_data = data[np.random.randint(data.shape[0], size=size), :]
for i, aug in enumerate(augmented_data):
random_values = np.random.randint(low=0, high=2, size=3)
while sum(random_values) == 0:
random_values = np.random.randint(low=0, high=2, size=3)
aug_label = np.asarray(aug[0])
aug_values = aug[1:].reshape(28, 28)
# Rotate
if random_values[0] == 1:
random_rotation = np.random.randint(low=-15.0, high=15.0)
# Guarantees that the number will rotate
while random_rotation == 0:
random_rotation = np.random.randint(low=-15, high=15)
aug_values = scipy.ndimage.rotate(aug_values, random_rotation,
reshape=False)
# Shift
# if random_values[1] == 1:
random_shift = np.random.randint(low=-3, high=3, size=2)
# Guarantees that there will be at least one shift
while sum(random_shift) == 0:
random_shift = np.random.randint(low=-3, high=3, size=2)
aug_values = scipy.ndimage.shift(aug_values, random_shift)
# Zoom
if random_values[2] == 1:
zoom_values = {0:1, 1:1.3, 2:1.5, 3:1.8, 4:2.0}
rezoom_values = {0:0, 1:4, 2:7, 3:11, 4:14}
random_zoom = np.random.randint(low=0, high=5, size=2)
# Guarantees that there will be at least one zoom
while sum(random_zoom) == 0:
random_zoom = np.random.randint(low=0, high=5, size=2)
zoom = (zoom_values[random_zoom[0]], zoom_values[random_zoom[1]])
aug_values = scipy.ndimage.interpolation.zoom(aug_values, zoom)
ax0, ax1 = aug_values.shape
ax0_i, ax1_i = (ax0-28)//2, (ax1-28)//2
ax0_f = -ax0_i if ax0_i != 0 else 28
ax1_f = -ax1_i if ax1_i != 0 else 28
aug_values = aug_values[ax0_i: ax0_f, ax1_i: ax1_f]
aug_values.reshape(1, 784)
aug = np.append(aug_label, aug_values)
augmented_data[i] = aug
return augmented_data
def scalar_to_vec(labels_dense, num_classes):
"""
Converts the dense label into a sparse vector.
Parameters
----------
:type labels_dense: int8
:param labels_dense: dense label
:type num_classes: int8
:param num_classes: number of classes
Returns
-------
<class 'numpy.ndarray'>
A numpy array of length `num_classes` of zeros except for a 1 in
the position of `labels_dense`.
Examples
--------
>>> scalar_to_vec(4, 10)
[ 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
"""
assert type(labels_dense) == int
assert type(num_classes) == int
vec = np.zeros(num_classes)
vec[labels_dense] = 1
return vec
| mit | 5,176,013,660,405,668,000 | 27.417582 | 70 | 0.627568 | false |
examachine/pisi | pisi/archive.py | 1 | 12143 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# Author: Eray Ozkural
# Baris Metin
'''Archive module provides access to regular archive file types.'''
# standard library modules
import os
import stat
import shutil
import tarfile
import zipfile
import gettext
__trans = gettext.translation('pisi', fallback=True)
_ = __trans.ugettext
# PISI modules
import pisi
import pisi.util as util
import pisi.context as ctx
class ArchiveError(pisi.Error):
pass
class LZMAError(pisi.Error):
def __init__(self, err):
pisi.Error.__init__(self, _("An error has occured while running LZMA:\n%s") % err)
class ArchiveBase(object):
"""Base class for Archive classes."""
def __init__(self, file_path, atype):
self.file_path = file_path
self.type = atype
def unpack(self, target_dir, clean_dir = False):
self.target_dir = target_dir
# first we check if we need to clean-up our working env.
if os.path.exists(self.target_dir) and clean_dir:
util.clean_dir(self.target_dir)
os.makedirs(self.target_dir)
class ArchiveBinary(ArchiveBase):
"""ArchiveBinary handles binary archive files (usually distrubuted as
.bin files)"""
def __init__(self, file_path, arch_type = "binary"):
super(ArchiveBinary, self).__init__(file_path, arch_type)
def unpack(self, target_dir, clean_dir = False):
super(ArchiveBinary, self).unpack(target_dir, clean_dir)
# we can't unpack .bin files. we'll just move them to target
# directory and leave the dirty job to actions.py ;)
import shutil
target_file = os.path.join(target_dir, os.path.basename(self.file_path))
shutil.copyfile(self.file_path, target_file)
class ArchiveTar(ArchiveBase):
"""ArchiveTar handles tar archives depending on the compression
type. Provides access to tar, tar.gz and tar.bz2 files.
This class provides the unpack magic for tar archives."""
def __init__(self, file_path, arch_type = "tar"):
super(ArchiveTar, self).__init__(file_path, arch_type)
self.tar = None
def unpack(self, target_dir, clean_dir = False):
"""Unpack tar archive to a given target directory(target_dir)."""
super(ArchiveTar, self).unpack(target_dir, clean_dir)
self.unpack_dir(target_dir)
def unpack_dir(self, target_dir):
rmode = ""
if self.type == 'tar':
rmode = 'r:'
elif self.type == 'targz':
rmode = 'r:gz'
elif self.type == 'tarbz2':
rmode = 'r:bz2'
elif self.type == 'tarlzma':
rmode = 'r:'
ret, out, err = util.run_batch("lzma -d -f " + self.file_path)
if ret != 0:
raise LZMAError(err)
self.file_path = self.file_path.rstrip(ctx.const.lzma_suffix)
else:
raise ArchiveError(_("Archive type not recognized"))
self.tar = tarfile.open(self.file_path, rmode)
oldwd = os.getcwd()
os.chdir(target_dir)
install_tar_path = util.join_path(ctx.config.tmp_dir(),
ctx.const.install_tar)
for tarinfo in self.tar:
# Installing packages (especially shared libraries) is a
# bit tricky. You should also change the inode if you
# change the file, cause the file is opened allready and
# accessed. Removing and creating the file will also
# change the inode and will do the trick (in fact, old
# file will be deleted only when its closed).
#
# Also, tar.extract() doesn't write on symlinks... Not any
# more :).
if self.file_path == install_tar_path:
if os.path.isfile(tarinfo.name) or os.path.islink(tarinfo.name):
try:
os.unlink(tarinfo.name)
except OSError, e:
ctx.ui.warning(e)
self.tar.extract(tarinfo)
os.chdir(oldwd)
self.close()
def add_to_archive(self, file_name, arc_name=None):
"""Add file or directory path to the tar archive"""
if not self.tar:
if self.type == 'tar':
wmode = 'w:'
elif self.type == 'targz':
wmode = 'w:gz'
elif self.type == 'tarbz2':
wmode = 'w:bz2'
elif self.type == 'tarlzma':
wmode = 'w:'
self.file_path = self.file_path.rstrip(ctx.const.lzma_suffix)
else:
raise ArchiveError(_("Archive type not recognized"))
self.tar = tarfile.open(self.file_path, wmode)
self.tar.add(file_name, arc_name)
def close(self):
self.tar.close()
if self.tar.mode == 'w' and self.type == 'tarlzma':
batch = None
lzma = "lzma -z"
if ctx.config.values.build.compressionlevel:
lzma += " -%d" % ctx.config.values.build.compressionlevel
ret, out, err = util.run_batch("%s %s" % (lzma, self.file_path))
if ret != 0:
raise LZMAError(err)
class ArchiveZip(ArchiveBase):
"""ArchiveZip handles zip archives.
Being a zip archive PISI packages also use this class
extensively. This class provides unpacking and packing magic for
zip archives."""
symmagic = 0xA1ED0000L
def __init__(self, file_path, arch_type = "zip", mode = 'r'):
super(ArchiveZip, self).__init__(file_path, arch_type)
self.zip_obj = zipfile.ZipFile(self.file_path, mode, zipfile.ZIP_DEFLATED)
def close(self):
"""Close the zip archive."""
self.zip_obj.close()
def list_archive(self):
return self.zip_obj.namelist()
def add_to_archive(self, file_name, arc_name=None):
"""Add file or directory path to the zip file"""
# It's a pity that zipfile can't handle unicode strings. Grrr!
file_name = str(file_name)
if os.path.isdir(file_name) and not os.path.islink(file_name):
self.zip_obj.writestr(file_name + '/', '')
attr_obj = self.zip_obj.getinfo(file_name + '/')
attr_obj.external_attr = stat.S_IMODE(os.stat(file_name)[0]) << 16L
for f in os.listdir(file_name):
self.add_to_archive(os.path.join(file_name, f))
else:
if os.path.islink(file_name):
dest = os.readlink(file_name)
attr = zipfile.ZipInfo()
attr.filename = file_name
attr.create_system = 3
attr.external_attr = self.symmagic
self.zip_obj.writestr(attr, dest)
else:
if not arc_name:
arc_name = file_name
#print 'Adding %s as %s' % (file_name, arc_name)
self.zip_obj.write(file_name, arc_name)
#zinfo = self.zip_obj.getinfo(arc_name)
#zinfo.create_system = 3
def add_basename_to_archive(self, file_name):
"""Add only the basepath to the zip file. For example; if the given
file_name parameter is /usr/local/bin/somedir, this function
will create only the base directory/file somedir in the
archive."""
cwd = os.getcwd()
path_name = os.path.dirname(file_name)
file_name = os.path.basename(file_name)
if path_name:
os.chdir(path_name)
self.add_to_archive(file_name)
os.chdir(cwd)
def has_file(self, file_path):
""" Returns true if file_path is member of the zip archive"""
return file_path in self.zip_obj.namelist()
def unpack_file_cond(self, pred, target_dir, archive_root = ''):
"""Unpack/Extract files according to predicate function
pred: filename -> bool
unpacks stuff into target_dir and only extracts files
from archive_root, treating it as the archive root"""
zip_obj = self.zip_obj
for info in zip_obj.infolist():
if pred(info.filename): # check if condition holds
# below code removes that, so we find it here
is_dir = info.filename.endswith('/')
# calculate output file name
if archive_root == '':
outpath = info.filename
else:
# change archive_root
if util.subpath(archive_root, info.filename):
outpath = util.removepathprefix(archive_root,
info.filename)
else:
continue # don't extract if not under
ofile = os.path.join(target_dir, outpath)
if is_dir: # this is a directory
d = os.path.join(target_dir, outpath)
if not os.path.isdir(d):
os.makedirs(d)
perm = info.external_attr
perm &= 0xFFFF0000
perm >>= 16
perm |= 0x00000100
os.chmod(d, perm)
continue
# check that output dir is present
util.check_dir(os.path.dirname(ofile))
# remove output file we might be overwriting.
# (also check for islink? for broken symlinks...)
if os.path.isfile(ofile) or os.path.islink(ofile):
os.remove(ofile)
if info.external_attr == self.symmagic:
if os.path.isdir(ofile):
shutil.rmtree(ofile) # a rare case, the file used to be a dir, now it is a symlink!
target = zip_obj.read(info.filename)
os.symlink(target, ofile)
else:
perm = info.external_attr
perm &= 0x08FF0000
perm >>= 16
perm |= 0x00000100
buff = open (ofile, 'wb')
file_content = zip_obj.read(info.filename)
buff.write(file_content)
buff.close()
os.chmod(ofile, perm)
def unpack_files(self, paths, target_dir):
self.unpack_file_cond(lambda f:f in paths, target_dir)
def unpack_dir(self, path, target_dir):
self.unpack_file_cond(lambda f:util.subpath(path, f), target_dir)
def unpack_dir_flat(self, path, target_dir):
self.unpack_file_cond(lambda f:util.subpath(path, f), target_dir, path)
def unpack(self, target_dir, clean_dir=False):
super(ArchiveZip, self).unpack(target_dir, clean_dir)
self.unpack_file_cond(lambda f: True, target_dir)
self.close()
return
class Archive:
"""Archive is the main factory for ArchiveClasses, regarding the
Abstract Factory Pattern :)."""
def __init__(self, file_path, arch_type):
"""accepted archive types:
targz, tarbz2, zip, tar"""
handlers = {
'targz': ArchiveTar,
'tarbz2': ArchiveTar,
'tarlzma': ArchiveTar,
'tar': ArchiveTar,
'zip': ArchiveZip,
'binary': ArchiveBinary
}
if not handlers.has_key(arch_type):
raise ArchiveError(_("Archive type not recognized"))
self.archive = handlers.get(arch_type)(file_path, arch_type)
def unpack(self, target_dir, clean_dir = False):
self.archive.unpack(target_dir, clean_dir)
def unpack_files(self, files, target_dir):
self.archive.unpack_files(files, target_dir)
| gpl-3.0 | 2,765,044,694,155,434,000 | 35.79697 | 107 | 0.555052 | false |
lukovkin/ufcnn-keras | models/create_signals_bid_ask_multiplefiles.py | 1 | 15205 | from __future__ import absolute_import
from __future__ import print_function
import sys
import glob
from copy import copy, deepcopy
import numpy as np
# import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 1000)
from signals import *
def find_all_signals(_df, comission=0.0, max_position_size=1, debug=False):
"""
Function finds and returns all signals that could result in profitable deals taking into account comission.
E.g. it will return Buy and Sell signal if ask price at Buy is lower than bid price at Sell minus the comission.
Then it will move one step forward and consider already seen Sell signal and the next Buy for the possible
profitable short deal.
"""
df = deepcopy(_df)
df['Buy'] = np.zeros(df.shape[0])
df['Sell'] = np.zeros(df.shape[0])
df['Buy Mod'] = np.zeros(df.shape[0])
df['Sell Mod'] = np.zeros(df.shape[0])
inflection_points = pd.DataFrame(
{'Buy': df["askpx_"].diff().shift(-1) > 0, 'Sell': df["bidpx_"].diff().shift(-1) < 0})
iterator = inflection_points.iterrows()
max_count = 0
position_size = 0
try:
while True:
# for i in range(0, 100):
idx_open, next_idx, row_open, sig_type_open = next_signal(iterator, df)
iterator = inflection_points.loc[next_idx:].iterrows()
iterator.next()
df[sig_type_open][idx_open] = 1
except TypeError:
print("Iteration stopped")
print("Buy candidates: {} Sell candidates: {}".format(df[df['Buy'] != 0].count()['Buy'],
df[df['Sell'] != 0].count()['Sell']))
candidates = df[(df['Buy'] != 0) | (df['Sell'] != 0)].iterrows()
idx_open, row_open = candidates.next()
for idx, row in candidates:
if row_open['Buy'] == 1 and (df["bidpx_"][idx] > (df["askpx_"][idx_open] + comission)):
df['Buy Mod'][idx_open] += 1
df['Sell Mod'][idx] += 1
elif row_open['Sell'] == 1 and (df["askpx_"][idx] < (df["bidpx_"][idx_open] - comission)):
df['Sell Mod'][idx_open] += 1
df['Buy Mod'][idx] += 1
idx_open = idx
row_open = row
df = df.rename(columns={"Buy": "Buy Candidates", "Sell": "Sell Candidtates"})
df['Buy'] = np.zeros(df.shape[0])
df['Sell'] = np.zeros(df.shape[0])
df['Buy'][df['Buy Mod'] != 0] = 1
df['Sell'][df['Sell Mod'] != 0] = 1
print("Buy: {} Sell: {}".format(df[df['Buy Mod'] != 0].count()['Buy Mod'],
df[df['Sell Mod'] != 0].count()['Sell Mod']))
print("Buy: {} Sell: {}".format(df[df['Buy'] != 0].count()['Buy'], df[df['Sell'] != 0].count()['Sell']))
return df
def next_signal(iterator, df=None, sig_type=None, outer_idx=None, outer_row=None):
"""
Recursive function to find best signal (Buy or Sell) of the sequnce of possible candidates (inflection points).
It compares current candidate and next candidates, if one of the next candidates of the same type is better,
e.g. if current candidate is Buy with ask price 20 and next candidate (1) is Buy with ask price 10,
then next candidate (2) is Buy with ask price 15, the function should return next candidate (1) with ask price 10
when it will face first consequtive Sell candidate.
"""
prev_idx = outer_idx
best_idx = outer_idx
best_row = outer_row
for idx, row in iterator:
# print(idx, row)
if row['Buy'] or row['Sell']:
inner_sig_type = 'Buy' if row['Buy'] else 'Sell'
print("Inner signal: ", idx, inner_sig_type)
if sig_type:
print("Outer signal: ", outer_idx, sig_type)
if inner_sig_type == sig_type:
print("Compare {} bid: {} ask: {} with {} bid: {} ask: {}".
format(best_idx, df["bidpx_"][best_idx], df["askpx_"][best_idx], idx, df["bidpx_"][idx],
df["askpx_"][idx]))
if sig_type == 'Buy' and df["askpx_"][idx] < df["askpx_"][best_idx]:
print("Better {} candidate at {} with price {}".format(sig_type, idx, df["askpx_"][idx]))
best_idx, best_row = idx, row
# return idx, idx, row, sig_type
if sig_type == 'Sell' and df["bidpx_"][idx] > df["bidpx_"][best_idx]:
print("Better {} candidate at {} with price {}".format(sig_type, idx, df["bidpx_"][idx]))
best_idx, best_row = idx, row
# return idx, idx, row, sig_type
prev_idx = idx
else:
print("Best {} candidate at {}, break...".format(sig_type, outer_idx))
return best_idx, prev_idx, best_row, sig_type
else:
print("Recursion")
return next_signal(iterator, df, inner_sig_type, idx, row)
def set_positions(_df):
df = deepcopy(_df)
df['Pos'] = np.zeros(df.shape[0])
last_position = 0
longs = 0
shorts = 0
iterator = df.iterrows()
last_idx, last_row = iterator.next()
for idx, row in iterator:
df.loc[idx]['Pos'] = row['Buy Mod'] - row['Sell Mod'] + last_row['Pos']
last_idx, last_row = idx, row
if df.loc[idx]['Pos'] != last_position and df.loc[idx]['Pos'] > 0:
longs += 1
elif df.loc[idx]['Pos'] != last_position and df.loc[idx]['Pos'] < 0:
shorts += 1
last_position = df.loc[idx]['Pos']
print("Long positions: {} Short positions: {}".format(longs, shorts))
return df
def find_signals(df, sig_type, comission=0.0, debug=False):
colnames = {"Buy": ("Buy", "Sell Close"),
"Sell": ("Sell", "Buy Close")}
inflection_points_buy = df["askpx_"].diff().shift(-1) > 0
inflection_points_sell = df["bidpx_"].diff().shift(-1) < 0
iterator = inflection_points_buy.iteritems() if sig_type == "Buy" else inflection_points_sell.iteritems()
inflection_points = inflection_points_buy if sig_type == "Buy" else inflection_points_sell
inner_inflection_points = inflection_points_sell if sig_type == "Buy" else inflection_points_buy
max_count = 0
(major_colname, minor_colname) = colnames[sig_type]
df[major_colname] = np.zeros(df.shape[0])
df[minor_colname] = np.zeros(df.shape[0])
for idx, val in iterator:
if max_count > 10000 and debug:
print("Max count reached, break...")
break
inner_iterator = inner_inflection_points.loc[idx:].iteritems()
if df[df[minor_colname] == 1].empty:
can_open = True
else:
can_open = idx > df[df[minor_colname] == 1].index[-1]
max_count += 1
if val and can_open:
print("{} candidate at {} with price {}".format(sig_type, idx, df["askpx_"][idx]))
for inner_idx, inner_val in inner_iterator:
if inner_idx > idx:
if sig_type == "Buy":
if df["askpx_"][inner_idx] < df["askpx_"][idx] and inflection_points[inner_idx]:
print("Better {} candidate at {} with price {}, break...".format(sig_type, inner_idx,
df["askpx_"][inner_idx]))
break
if df["bidpx_"][inner_idx] > (df["askpx_"][idx] + comission) and inner_val:
df[major_colname][idx] = 1
df[minor_colname][inner_idx] = 1
print("Buy at {} with price {}".format(idx, df["askpx_"][idx]))
print("Sell at {} with price {}".format(inner_idx, df["bidpx_"][inner_idx]))
break
elif sig_type == "Sell":
if df["bidpx_"][inner_idx] > df["bidpx_"][idx] and inflection_points[inner_idx]:
print("Better {} candidate at {} with price {}, break...".format(sig_type, inner_idx,
df["bidpx_"][inner_idx]))
break
if df["askpx_"][inner_idx] < (df["bidpx_"][idx] - comission) and inner_val:
df[major_colname][idx] = 1
df[minor_colname][inner_idx] = 1
print("Sell at {} with price {}".format(idx, df["bidpx_"][idx]))
print("Buy at {} with price {}".format(inner_idx, df["askpx_"][inner_idx]))
break
return df
def filter_signals(df):
buys = df["Buy"] + df["Buy Close"]
df["Buy Mod"] = np.zeros(df.shape[0])
df["Buy Mod"][buys == 2] = 1
sells = df["Sell"] + df["Sell Close"]
df["Sell Mod"] = np.zeros(df.shape[0])
df["Sell Mod"][sells == 2] = 1
iterator = df.iterrows()
current_signal = 0
for idx, row in iterator:
current_signal = row["Buy Mod"] - row["Sell Mod"]
if current_signal != 0:
print("Signal {} at {}".format(current_signal, idx))
inner_iterator = df.loc[idx:].iterrows()
inner_iterator.next()
for inner_idx, inner_row in inner_iterator:
next_signal = inner_row["Buy Mod"] - inner_row["Sell Mod"]
if next_signal == current_signal:
print("Consecutive similar signal {} at {}".format(next_signal, inner_idx))
if current_signal == 1:
df_slice = df.loc[idx:inner_idx]
candidates = df_slice[df_slice["Sell"] == 1]
best_candidate = candidates["bidpx_"].idxmax()
print(df.loc[best_candidate])
df["Sell Mod"].loc[best_candidate] = 1
break
elif current_signal == -1:
df_slice = df.loc[idx:inner_idx]
candidates = df_slice[df_slice["Buy"] == 1]
best_candidate = candidates["askpx_"].idxmin()
print(df.loc[best_candidate])
df["Buy Mod"].loc[best_candidate] = 1
break
elif next_signal != 0 and next_signal != current_signal:
break
df["Buy Open"] = df["Buy"]
df["Sell Open"] = df["Sell"]
df = df.drop(["Buy", "Sell"], axis=1)
print(df.columns)
df = df.rename(columns={"Buy Mod": "Buy", "Sell Mod": "Sell"})
print(df.columns)
# df = df.drop(["Buy Close", "Sell Close"], axis=1)
return df
def make_spans(df, sig_type):
span_colname = "Buys" if sig_type == "Buy" else "Sells"
reversed_df = df[::-1]
df[span_colname] = np.zeros(df.shape[0])
for idx in df[sig_type][df[sig_type] == 1].index:
signal_val = df.loc[idx]
iterator = reversed_df.loc[idx:].iterrows()
_d = print("Outer loop:", idx, signal_val["askpx_"]) if sig_type == "Buy" else print("Outer loop:", idx,
signal_val["bidpx_"])
for i, val in iterator:
# _d = print("Inner loop:", i, val["askpx_"]) if sig_type == "Buy" else print("Inner loop:", i, val["bidpx_"])
if sig_type == "Buy":
if val["askpx_"] == signal_val["askpx_"]:
# print("Add to buys")
df[span_colname][i] = 1
else:
break
elif sig_type == "Sell":
if val["bidpx_"] == signal_val["bidpx_"]:
# print("Add to sells")
df[span_colname][i] = 1
else:
break
return df
def pnl(df, chained=False):
deals = []
pnl = 0
if not chained:
for idx, row in df[(df['Buy Mod'] != 0) | (df['Sell Mod'] != 0)].iterrows():
current_trade = row['Sell Mod'] * row["bidpx_"] - row['Buy Mod'] * row["askpx_"]
pnl = pnl + current_trade
deals.append(current_trade)
print("Running PnL: ", pnl)
print("Check PnL: {} vs {}".format(pnl, np.sum(deals)))
return pnl, len(deals)
else:
is_opened = False
for idx, row in df.iterrows():
if row["Buy"]:
if is_opened:
deals.append(-row["askpx_"])
deals.append(-row["askpx_"])
is_opened = True
elif row["Sell"]:
if is_opened:
deals.append(row["bidpx_"])
deals.append(row["bidpx_"])
is_opened = True
print(len(deals))
deals.pop()
print(len(deals))
return np.sum(deals), len(deals)
def __main__():
"""
Trading Simulator from curriculumvite trading competition
see also the arvix Paper from Roni Mittelman http://arxiv.org/pdf/1508.00317v1
Modified by [email protected]
produces data to train a neural net
# Trades smaller than this will be omitted
min_trade_amount = None
comission = 0.0
if len(sys.argv) < 2 :
print ("Usage: day_trading_file, NOT target_price-file ")
sys.exit()
day_file = sys.argv[1]
try:
write_spans = True if sys.argv[2] == "--spans" else False
except IndexError:
write_spans = False
try:
chained_deals = True if sys.argv[3] == "--chained-deals" else False
except IndexError:
chained_deals = False
generate_signals_for_file(day_file, comission, write_spans, chained_deals)
"""
"""
Trading Simulator from curriculumvite trading competition
see also the arvix Paper from Roni Mittelman http://arxiv.org/pdf/1508.00317v1
Modified by [email protected]
produces data to train a neural net
"""
# Trades smaller than this will be omitted
file_list = sorted(glob.glob('./training_data_large/prod_data_*v.txt'))
if len(file_list) == 0:
print(
"Empty directory. Please copy tick data files into ./training_data_large/ . Aborting.")
sys.exit()
min_trade_amount = None
comission = 0.0
for j in range(len(file_list)):
filename = file_list[j]
print('Training: ', filename)
day_file = filename
if len(sys.argv) < 2 :
print(
"No ./training_data_large/product_data_*txt files exist in the directory. Please copy them in the ./training_data_largetest/ . Aborting.")
sys.exit()
try:
write_spans = True if sys.argv[1] == "--spans" else False
except IndexError:
write_spans = False
try:
chained_deals = True if sys.argv[2] == "--chained-deals" else False
except IndexError:
chained_deals = False
generate_signals_for_file(day_file, comission, write_spans, chained_deals)
__main__();
| mit | 1,587,396,667,713,151,700 | 38.596354 | 155 | 0.517659 | false |
jacebrowning/gridcommand | gridcommand/routes/_formatters.py | 1 | 3337 | """Formats domain objects for route responses."""
from collections import OrderedDict
from flask import url_for
from ._bases import Formatter
# TODO: figure out a better way to serialize objects without parent objects
# pylint: disable=arguments-differ
class GameFormatter(Formatter):
"""Serializes games into dictionaries."""
def format_single(self, game):
data = OrderedDict()
kwargs = dict(_external=True, key=game.key)
data['uri'] = url_for('games.detail', **kwargs)
data['key'] = game.key
data['timestamp'] = game.timestamp
data['players'] = url_for('players.index', **kwargs)
data['turn'] = game.turn
data['pending'] = game.pending
data['start'] = url_for('games.start', **kwargs)
return data
def format_multiple(self, games):
return [url_for('games.detail',
_external=True, key=game.key) for game in games]
class PlayerFormatter(Formatter):
"""Serializes players into dictionaries."""
def format_single(self, player, game, auth):
data = OrderedDict()
kwargs = dict(_external=True, key=game.key, color=player.color)
if auth:
kwargs.update(code=player.code)
data['uri'] = url_for('players.detail', **kwargs)
data['color'] = player.color
if auth:
data['code'] = player.code
data['done'] = player.turn.done
if auth:
data['turns'] = url_for('turns.index', **kwargs)
return data
def format_multiple(self, players, game):
return [url_for('players.detail', _external=True,
key=game.key, color=player.color) for player in players]
class BoardFormatter(Formatter):
def format_single(self, board):
data = OrderedDict()
# TODO: format board
print(board)
return data
class TurnFormatter(Formatter):
"""Serializes turns into dictionaries."""
def format_single(self, game, player, number):
data = OrderedDict()
kwargs = dict(_external=True,
key=game.key,
color=player.color,
code=player.code,
number=number)
data['uri'] = url_for('turns.detail', **kwargs)
data['moves'] = url_for('moves.index', **kwargs)
data['finish'] = url_for('turns.finish', **kwargs)
return data
def format_multiple(self, turns, game, player):
return [url_for('turns.detail', _external=True,
key=game.key, color=player.color, code=player.code,
number=index + 1) for index in range(len(turns))]
class MoveFormatter(Formatter):
"""Serializes moves into dictionaries."""
def format_single(self, move):
data = OrderedDict()
data['count'] = move.count
return data
def format_multiple(self, moves, game, player):
return [url_for('moves.detail', _external=True,
key=game.key, color=player.color, code=player.code,
begin=move.begin, end=move.end) for move in moves]
game_formatter = GameFormatter()
player_formatter = PlayerFormatter()
board_formatter = BoardFormatter()
turn_formatter = TurnFormatter()
move_formatter = MoveFormatter()
| lgpl-3.0 | -6,029,994,102,759,506,000 | 28.27193 | 80 | 0.596644 | false |
5GExchange/mapping | hybrid/ResourceSharingStrategy.py | 1 | 8177 | # Copyright 2017 Balazs Nemeth, Mark Szalay, Janos Doka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from abc import ABCMeta, abstractmethod
try:
# runs when mapping files are called from ESCAPE
from escape.nffg_lib.nffg import NFFG, NFFGToolBox
except ImportError:
# runs when mapping repo is cloned individually, and NFFG lib is in a
# sibling directory. WARNING: cicular import is not avioded by design.
import site
site.addsitedir('..')
from nffg_lib.nffg import NFFG, NFFGToolBox
import logging
log = logging.getLogger(" Resource sharing")
class AbstractResourceSharingStrategy(object):
__metaclass__ = ABCMeta
def __init__(self, resource_grap, full_log_path):
self.bare_resource_graph = resource_grap
log.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s')
logging.basicConfig(filename='log_file.log', filemode='w',
level=logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s | Res sharing | %(levelname)s | \t%(message)s')
hdlr = logging.FileHandler(full_log_path)
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
log.setLevel(logging.DEBUG)
# All strategies shall return a copy for the very first time it is
# called (maybe other times too if it is necessary)
self.called_for_first_time = True
@abstractmethod
def get_online_resource(self, res_online, res_offline):
raise NotImplementedError("Abstract method")
@abstractmethod
def get_offline_resource(self, res_online, res_offline):
raise NotImplementedError("Abstract method")
class DynamicMaxOnlineToAll(AbstractResourceSharingStrategy):
"""
Calculates the maximal resource utilization in every resource component,
and sets all elements of the offline resource graph with this value as the
available capacity.
The online resource graph is with 100% resources.
-- SHOULDN'T IT BE THE REMAINING FROM THE MAXRESOURCE?
"""
# NOT READY YET !!!!!!!!!!
def __init__(self, resource_grap, full_log_path):
super(DynamicMaxOnlineToAll, self).__init__(resource_grap, full_log_path)
self.link_bw_types = {}
self.max_used_node_bw = 0.0
self.max_used_node_cpu = 0
self.max_used_node_mem = 0.0
self.max_used_node_storage = 0.0
self.max_used_sw_bw = 0.0
self.float_uncertainty_addend = 1e-06
def set_rg_max_avail_node_and_link(self, rs):
res_online = copy.deepcopy(rs)
res_online.calculate_available_node_res()
res_online.calculate_available_link_res([])
# TODO: Percentages should be set instead of absolute values!! (currently it may increase node resourse above the originally available!)
for i in res_online.infras:
if i.infra_type == 'EE':
if i.resources.bandwidth - i.availres.bandwidth > self.max_used_node_bw:
self.max_used_node_bw = i.resources.bandwidth - i.availres.bandwidth
if i.resources.cpu - i.availres.cpu > self.max_used_node_cpu:
self.max_used_node_cpu = i.resources.cpu - i.availres.cpu
if i.resources.mem - i.availres.mem > self.max_used_node_mem:
self.max_used_node_mem = i.resources.mem - i.availres.mem
if i.resources.storage - i.availres.storage > self.max_used_node_storage:
self.max_used_node_storage = i.resources.storage - i.availres.storage
elif i.infra_type == 'SDN-SWITCH':
if i.resources.bandwidth - i.availres.bandwidth > self.max_used_sw_bw:
self.max_used_sw_bw = i.resources.bandwidth - i.availres.bandwidth
else:
log.error("Invalid infra type!")
raise
self.max_used_node_bw += self.float_uncertainty_addend
self.max_used_node_cpu += self.float_uncertainty_addend
self.max_used_node_mem += self.float_uncertainty_addend
self.max_used_node_storage += self.float_uncertainty_addend
self.max_used_sw_bw += self.float_uncertainty_addend
for tup in (
('cpu', self.max_used_node_cpu), ('node_bw', self.max_used_node_bw),
('mem', self.max_used_node_mem),
('storage', self.max_used_node_storage),
('sw_bw', self.max_used_sw_bw)):
log.debug("Maximal used %s resource to set is %s" % tup)
#Calculate links
self.link_bw_types = {}
for i, j, k, d in res_online.network.edges_iter(data=True, keys=True):
if d.type == 'STATIC':
if int(d.bandwidth) not in self.link_bw_types:
self.link_bw_types[int(d.bandwidth)] = 0.0
for i, j, k, d in res_online.network.edges_iter(data=True, keys=True):
if d.type == 'STATIC':
if d.bandwidth - d.availbandwidth > self.link_bw_types[int(d.bandwidth)]:
self.link_bw_types[int(d.bandwidth)] = d.bandwidth - d.availbandwidth
for i in self.link_bw_types:
self.link_bw_types[i] += self.float_uncertainty_addend
log.debug("Max used link bandwidths by link types: %s" %
self.link_bw_types)
def get_offline_resource(self, res_online, res_offline):
self.set_rg_max_avail_node_and_link(res_online)
to_offline = copy.deepcopy(self.bare_resource_graph)
for i in to_offline.infras:
new_resources = copy.deepcopy(i.resources)
if i.infra_type == 'EE':
new_resources.bandwidth = self.max_used_node_bw
new_resources.cpu = self.max_used_node_cpu
new_resources.mem = self.max_used_node_mem
new_resources.storage = self.max_used_node_storage
i.resources = new_resources
elif i.infra_type == 'SDN-SWITCH':
new_resources.bandwidth = self.max_used_sw_bw
i.resources = new_resources
else:
log.error("Invalid infra type!")
raise
for i, j, k, edge in to_offline.network.edges_iter(data=True, keys=True):
edge.bandwidth = self.link_bw_types[int(edge.bandwidth)]
# copy the actual NF mappings from res_online to the res_offline with
# decreased maximal capacities.
to_offline = NFFGToolBox.merge_nffgs(to_offline, res_online, silent=True)
try:
to_offline.calculate_available_node_res()
to_offline.calculate_available_link_res([])
except RuntimeError as re:
log.error("Offline resource would return invalid mapping after "
"copying the actual mapping state: %s"%re.message)
raise
return to_offline
def get_online_resource(self, res_online, res_offline):
return copy.deepcopy(res_online)
class DoubleHundred(AbstractResourceSharingStrategy):
def get_offline_resource(self, res_online, res_offline):
# clean the resource from any unnecessary objects
to_offline = copy.deepcopy(self.bare_resource_graph)
to_offline = NFFGToolBox.merge_nffgs(to_offline, res_online, silent=True)
# the returned copy is independent of any other NFFG objects
return to_offline
def get_online_resource(self, res_online, res_offline):
# For first resource sharing
if self.called_for_first_time:
to_online = copy.deepcopy(res_online)
self.called_for_first_time = False
return to_online
else:
return res_online | apache-2.0 | -5,094,964,472,777,556,000 | 41.373057 | 144 | 0.633973 | false |
awsdocs/aws-doc-sdk-examples | python/example_code/dynamodb/GettingStarted/MoviesItemOps03a.py | 1 | 1449 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to update an item in an Amazon DynamoDB table that stores movies.
The update is performed in two steps:
1. The item is retrieved by using its primary and secondary keys.
2. The item is updated on the client and put into the table with updated data.
The item is retrieved again to verify the update was made as expected.
"""
# snippet-start:[dynamodb.python.codeexample.MoviesItemOps03a]
from decimal import Decimal
from pprint import pprint
import boto3
def update_movie(title, year, rating, plot, actors, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
table = dynamodb.Table('Movies')
resp = table.get_item(Key={'year': year, 'title': title})
item = resp['Item']
item['info']['rating'] = Decimal(rating)
item['info']['plot'] = plot
item['info']['actors'] = actors
table.put_item(Item=item)
return table.get_item(Key={'year': year, 'title': title})['Item']
if __name__ == '__main__':
movie = update_movie(
"The Big New Movie", 2015, 5.5, "Everything happens all at once.",
["Larry", "Moe", "Curly"])
print("Update movie succeeded:")
pprint(movie, sort_dicts=False)
# snippet-end:[dynamodb.python.codeexample.MoviesItemOps03a]
| apache-2.0 | 2,896,480,785,095,140,400 | 32.5 | 83 | 0.666667 | false |
StartupsPoleEmploi/labonneboite | labonneboite/common/load_data.py | 1 | 8478 | import os
import pickle
import csv
import pandas as pd
import math
from functools import lru_cache, reduce
from collections import defaultdict
USE_ROME_SLICING_DATASET = False # Rome slicing dataset is not ready yet
if USE_ROME_SLICING_DATASET:
OGR_ROME_FILE = "rome_slicing_dataset/ogr_rome_mapping.csv"
ROME_FILE = "rome_slicing_dataset/rome_labels.csv"
ROME_NAF_FILE = "rome_slicing_dataset/rome_naf_mapping.csv"
else:
OGR_ROME_FILE = "ogr_rome_mapping.csv"
ROME_FILE = "rome_labels.csv"
ROME_NAF_FILE = "rome_naf_mapping.csv"
def load_file(func, filename):
full_filename = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "data/%s" % filename)
return func(full_filename)
def load_pickle_file(filename):
def f(full_filename):
return pickle.load(open(full_filename, "r"))
return load_file(f, filename)
def load_pd_dataframe(filename, delimiter='', dtype=None):
def f(full_filename):
return pd.read_csv(open(full_filename, "r"), dtype=dtype)
return load_file(f, filename)
def load_csv_file(filename, delimiter='|'):
def f(full_filename):
csv_file = open(full_filename, 'r')
reader = csv.reader(csv_file, delimiter=delimiter)
return reader
reader = load_file(f, filename)
rows = []
len_previous_row = None
for row in reader:
if len_previous_row:
# at least second line of CSV file
if len(row) == 0:
# skip empty rows
continue
elif len(row) != len_previous_row:
raise IndexError(
"found row with abnormal number of fields : %s" % row)
rows.append(row)
else:
# first line of CSV file: headers should be ignored
pass
len_previous_row = len(row)
return rows
def load_rows_as_set(rows):
for row in rows:
if len(row) != 1:
raise IndexError("wrong number of fields")
return set([row[0] for row in rows])
def load_rows_as_dict(rows):
d = {}
for row in rows:
if len(row) != 2:
raise IndexError("wrong number of fields")
if row[0] in d:
raise ValueError("duplicate key")
d[row[0]] = row[1]
return d
def load_rows_as_dict_of_dict(rows):
d = {}
for row in rows:
if len(row) != 3:
raise IndexError("wrong number of fields")
# values of 3 fields
f1 = row[0]
f2 = row[1]
f3 = row[2]
if f1 in d:
if f2 in d[f1]:
raise ValueError("duplicate key")
else:
d[f1][f2] = f3
else:
d[f1] = {f2: f3}
return d
@lru_cache(maxsize=None)
def load_related_rome_areas():
"""
Build a dict with department code (code insee) as keys and area code as values (bassins d'emploi).
Used for PSE study in 2021.
"""
rows = load_csv_file("lbb-pse_bassin-emploi_code-insee.csv", delimiter=',')
return reduce(reduceRelateRomesAreas, rows, {})
def reduceRelateRomesAreas(aggr, row):
[code_insee, code_area] = row
aggr[code_insee] = code_area
return aggr
@lru_cache(maxsize=None)
def load_related_rome():
"""
Build a dict with area code (bassin d'emploi) as keys.
The values are dict with rome code as keys and a list of related rome codes as values.
Each related rome is a dict with `rome` and `score` properties.
Used for PSE study.
"""
rows = load_csv_file("lbb-pse_bassin-emploi_rome-connexe.csv", delimiter=',')
return reduce(reduceRelateRomes, rows, {})
def reduceRelateRomes(aggr, row):
[code_area, rome, rome_connexe, score] = row
entry_code_area = aggr.get(code_area, {})
entry_rome = entry_code_area.get(rome, [])
entry_rome.append({'rome': rome_connexe, 'score': float(score)})
entry_code_area[rome] = entry_rome
aggr[code_area] = entry_code_area
return aggr
@lru_cache(maxsize=None)
def load_city_codes():
rows = load_csv_file("city_codes.csv")
commune_id_to_commune_name = load_rows_as_dict(rows)
return commune_id_to_commune_name
@lru_cache(maxsize=None)
def load_contact_modes():
"""
Use comma delimiter instead of pipe so that it is recognized by github
and can easily be edited online by the intrapreneurs.
"""
rows = load_csv_file("contact_modes.csv", delimiter=',')
naf_prefix_to_rome_to_contact_mode = load_rows_as_dict_of_dict(rows)
return naf_prefix_to_rome_to_contact_mode
@lru_cache(maxsize=None)
def load_ogr_labels():
rows = load_csv_file("ogr_labels.csv")
ogr_to_label = load_rows_as_dict(rows)
return ogr_to_label
@lru_cache(maxsize=None)
def load_groupements_employeurs():
rows = load_csv_file("groupements_employeurs.csv")
sirets = load_rows_as_set(rows)
return sirets
@lru_cache(maxsize=None)
def load_ogr_rome_mapping():
rows = load_csv_file(OGR_ROME_FILE)
OGR_COLUMN = 0
ROME_COLUMN = 1
ogr_to_rome = {}
for row in rows:
ogr = row[OGR_COLUMN]
if ogr not in load_ogr_labels():
raise ValueError("missing label for OGR %s" % ogr)
rome = row[ROME_COLUMN]
if rome not in load_rome_labels():
raise ValueError("missing label for ROME %s" % rome)
ogr_to_rome[ogr] = rome
return ogr_to_rome
@lru_cache(maxsize=None)
def load_rome_labels():
rows = load_csv_file(ROME_FILE)
rome_to_label = load_rows_as_dict(rows)
return rome_to_label
@lru_cache(maxsize=None)
def load_naf_labels():
rows = load_csv_file("naf_labels.csv")
naf_to_label = load_rows_as_dict(rows)
return naf_to_label
@lru_cache(maxsize=None)
def load_rome_naf_mapping():
return load_csv_file(ROME_NAF_FILE, delimiter=',')
@lru_cache(maxsize=None)
def load_metiers_tension():
csv_metiers_tension = load_csv_file("metiers_tension.csv", ',')
rome_to_tension = defaultdict(int)
for row in csv_metiers_tension:
tension_pct = row[2]
rome_code = row[3]
# FIXME : remove rows where tension is #N/A in the dataset, to remove this ugly check ?
if tension_pct != '#N/A':
tension_pct = float(tension_pct)
if 0 <= tension_pct <= 100:
# As a single ROME can have multiple tensions,
# It has been decided to take the higher tension for a rome
rome_to_tension[rome_code] = max(rome_to_tension[rome_code], tension_pct)
else:
raise ValueError
return rome_to_tension
#Used for PSE study, it returns a list of SIRET that must not b be seen on LBB
@lru_cache(maxsize=None)
def load_siret_to_remove():
rows = load_csv_file("untreated_BB.csv", ',')
sirets_to_remove = load_rows_as_set(rows)
return sirets_to_remove
#Used by importer job to extract etablissement
@lru_cache(maxsize=None)
def load_effectif_labels():
'''
Dataframe to load look like this.
code label
0 0 0-0
1 1 1-2
2 2 3-5
3 3 6-9
4 11 10-19
5 12 20-49
6 21 50-99
7 22 100-199
8 31 200-249
9 32 250-499
10 41 500-999
11 42 1000-1999
12 51 2000-4999
13 52 5000-9999
14 53 10000+
'''
def create_column(row, which='start_effectif'):
'''
From the label, we want to create a start and end column to delimitate the interval
We'll be able to use it to simply determine from a number of employees in an office, in which category the office belongs to
'''
#we split on the label which is from type "10-19" OR 10000+
splitted_label = row['label'].split('-')
if len(splitted_label) == 1: #10000+
value = math.inf if which == 'end_effectif' else 10000
else:
if which == 'start_effectif':
value = int(splitted_label[0])
else:
value = int(splitted_label[1])
return value
df = load_pd_dataframe("helpers/effectif_labels.csv", ',', dtype={'code':str})
df['start_effectif'] = df.apply(lambda row: create_column(row,'start_effectif'), axis=1)
df['end_effectif'] = df.apply(lambda row: create_column(row,'end_effectif'), axis=1)
return df
OGR_ROME_CODES = load_ogr_rome_mapping()
ROME_CODES = list(OGR_ROME_CODES.values())
| agpl-3.0 | -1,503,132,164,283,685,000 | 28.4375 | 134 | 0.607926 | false |
timothydmorton/transit | setup.py | 1 | 4320 | #!/usr/bin/env python
# encoding: utf-8
import re
import os
try:
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
except ImportError:
from distutils.core import setup, Extension
from distutils.command.build_ext import build_ext as _build_ext
def find_boost(hint=None, verbose=True):
"""
Find the location of the Boost include directory. This will return
``None`` on failure.
"""
# List the standard locations including a user supplied hint.
search_dirs = [] if hint is None else hint
search_dirs += [
"/usr/local/include",
"/usr/local/homebrew/include",
"/opt/local/var/macports/software",
"/opt/local/include",
"/usr/include",
"/usr/include/local",
]
# Loop over search paths and check for the existence of the required
# header.
for d in search_dirs:
path = os.path.join(d, "boost", "math", "special_functions",
"ellint_3.hpp")
if os.path.exists(path):
# Determine the version.
vf = os.path.join(d, "boost", "version.hpp")
if not os.path.exists(vf):
continue
src = open(vf, "r").read()
v = re.findall("#define BOOST_LIB_VERSION \"(.+)\"", src)
if not len(v):
continue
v = v[0]
if verbose:
print("Found Boost version {0} in: {1}".format(v, d))
return d
return None
class build_ext(_build_ext):
"""
A custom extension builder that finds the include directories for Boost.
"""
def build_extension(self, ext):
dirs = ext.include_dirs + self.compiler.include_dirs
# Look for the Boost headers and make sure that we can find them.
boost_include = find_boost(hint=dirs)
if boost_include is None:
raise RuntimeError("Required library Boost not found. "
"Check the documentation for solutions.")
# Update the extension's include directories.
ext.include_dirs += [boost_include]
# Run the standard build procedure.
_build_ext.build_extension(self, ext)
if __name__ == "__main__":
import sys
import numpy
from Cython.Build import cythonize
# Publish the library to PyPI.
if "publish" in sys.argv[-1]:
os.system("python setup.py sdist upload")
sys.exit()
# Choose libraries to link.
libraries = []
if os.name == "posix":
libraries.append("m")
# Specify the include directories.
include_dirs = [
"include",
numpy.get_include(),
]
# The source files.
src = [
"transit/_transit.pyx",
# "transit/_transit.c",
"src/quad.cpp",
# "src/driver.cpp",
]
# Set up the extension.
ext = Extension("transit._transit", sources=src,
libraries=libraries, include_dirs=include_dirs)
# Hackishly inject a constant into builtins to enable importing of the
# package before the library is built.
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__TRANSIT_SETUP__ = True
import transit
# Execute the setup command.
desc = open("README.rst").read()
setup(
name="transit",
version=transit.__version__,
author="Daniel Foreman-Mackey",
author_email="[email protected]",
packages=["transit"],
py_modules=["transit.tests"],
ext_modules=cythonize([ext]),
url="http://github.com/dfm/transit",
license="MIT",
description="A Python library for computing the light curves of "
"transiting planets",
long_description=desc,
package_data={"": ["README.rst", "LICENSE", "include/*.h", ]},
include_package_data=True,
cmdclass=dict(build_ext=build_ext),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
test_suite="nose.collector",
)
| mit | -2,627,024,856,407,422,000 | 29.20979 | 76 | 0.577778 | false |
utzig/rastreio | rastreio/rastreio.py | 1 | 3738 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path
import re
import sys
import requests
import gevent
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
COMMENT = "^\s*#"
class TableParser(HTMLParser):
"""
This class is used to parse the table data received back by the
online lookup for each tracking code
"""
def __init__(self):
HTMLParser.reset(self)
self.inside_table = False
self.table_data = []
self.rowspan = 1
self.strict = False
self.convert_charrefs = False
def handle_starttag(self, tag, attrs):
if tag == 'table':
self.inside_table = True
self.current_row = 0
self.current_col = 0
if self.inside_table:
if tag == 'td':
if len(attrs) > 0 and attrs[0][0] == 'rowspan':
self.rowspan = int(attrs[0][1])
def handle_endtag(self, tag):
if self.inside_table:
if tag == 'tr':
self.rowspan -= 1
if self.rowspan == 0:
self.current_row += 1
self.current_col = 0
elif tag == 'td':
self.current_col += 1
if tag == 'table':
self.inside_table = False
def handle_data(self, data):
if self.inside_table and not re.match('\n', data):
stripped_data = " ".join(data.split())
if stripped_data != '':
value = (self.current_row, self.current_col, stripped_data)
self.table_data.append(value)
def lookup(tracking_code):
"""
Receives the tracking code as string and does online search.
Returns the html received from the page lookup
"""
params = 'Z_ACTION=Search&P_TIPO=001&P_LINGUA=001&P_COD_UNI={}'.format(tracking_code)
url = 'http://websro.correios.com.br/sro_bin/txect01$.QueryList'
return requests.post(url, params, timeout=10).text
def pretty_print(tracking_code, html_data):
parser = TableParser()
parser.feed(html_data)
last_row = 1
texts = []
texts.append(tracking_code + ':')
if len(parser.table_data) > 0:
line = ''
for data in parser.table_data:
row, col, text = data
if row == 0: # ignoring the first row because it's header info...
continue
if col != 0:
line += '| '
elif row != 1:
line += '\n'
line += '{} '.format(text)
texts.append(line)
else:
texts.append('O sistema não possui dados sobre o objeto informado.')
texts.append('')
return '\n'.join(texts)
def get_output_for(tracking_code, outputs):
try:
response_html = lookup(tracking_code)
output = pretty_print(tracking_code, response_html)
except requests.ConnectionError as e:
output = '{}:\nErro de conexão ao servidor.'.format(tracking_code)
outputs.append(output)
def main():
config_file = os.path.expanduser("~") + "/.rastreio.conf"
try:
with open(config_file) as f:
lines = f.readlines()
f.close()
except IOError:
print('Arquivo de entrada não encontrado!')
sys.exit()
greenlets = []
outputs = []
for line in lines:
if not re.match(COMMENT, line):
tracking_code = line.rstrip('\n')
greenlets.append(gevent.spawn(get_output_for, tracking_code, outputs))
# for connection errors just let requests timeout...
gevent.joinall(greenlets)
for output in outputs:
print(output)
if __name__ == "__main__":
main()
| mit | -4,670,546,664,483,328,000 | 28.179688 | 89 | 0.564926 | false |
spgill/python-spgill | spgill/printer/modules/utility.py | 1 | 2449 | # stdlib imports
import math
# vendor imports
# local imports
from spgill.printer import commands
class UtilityModule:
"""Mixin for utility functions. Nuff said."""
def progress(self, n, caption=None, zeropad=False, width=None):
"""Write a progress bar with `n` from 0 to 100"""
width = width or self.currentWidth()
if zeropad:
text = f"{n:0>3}% "
else:
text = f"{n: >3}% "
if caption:
text += caption + " "
self.text(text)
progwidth = width - len(text)
darkwidth = math.floor(n / 100 * progwidth)
lightwidth = progwidth - darkwidth
self.write(b"\xB2" * darkwidth)
self.write(b"\xB0" * lightwidth)
return self
def characterTable(self):
"""Write a 32->255 (0x20->0xff) character table"""
self.invert(True)
self.split(
"Current encoding:",
self.flags.encoding or self.connection.props.printerEncoding.value,
)
self.invert(False)
j = 0
for i in range(256)[32:]:
self.text(f"0x{i:0>2x}")
self.text(" ")
self.write(commands.char(i))
self.format(small=True)
self.write(commands.char(i))
self.format()
self.text(" ")
j += 1
if j == 4:
j = 0
self.br()
return self
def hexdump(self, stream):
self.format(small=True)
self.text(" Offset ")
flip = True
for i in range(16):
self.format(underline=flip)
self.text(f"{i:0>2x}")
flip = not flip
self.br()
i = 0
while True:
chunk = stream.read(16)
if not chunk:
break
self.text(f"{i:0>8x} ")
flip = True
for char in chunk:
self.format(underline=flip)
flip = not flip
self.text(f"{char:0>2x}")
if len(chunk) < 16:
self.text(" " * (16 - len(chunk)))
self.text(" ")
for char in chunk:
if char < 32:
self.invert(True)
self.text(" ")
self.invert(False)
else:
self.write(commands.char(char))
self.br()
i += 16
| mit | 6,182,663,993,454,536,000 | 22.32381 | 79 | 0.462638 | false |
pkhorrami4/make_chen_dataset | code/compute_global_stats.py | 1 | 2992 | import argparse
import os
import numpy
def compute_mean_std(X):
mean_train = numpy.mean(X, axis=0)
std_train = numpy.std(X, axis=0)
# print mu_train.shape, std_train.shape
return mean_train, std_train
def parse_args():
parser = argparse.ArgumentParser(
description='Compute mean and std of '
'facial landmark features '
' and facial landmark differences '
'and save them as .npy files.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset_path',
dest='dataset_path',
default='/data/Expr_Recog/Chen_Huang_avdata_python_augmented/npy_files/all/',
help='Folder containing landmark features.')
parser.add_argument(
'--feat_type',
dest='feat_type',
choices=['landmarks', 'landmarks_diff'],
help='Which feature to compute stats.')
parser.add_argument(
'--fold_type',
dest='fold_type',
choices=['subj_dep', 'subj_ind'],
help='Use subject dependent or indpendent folds.')
parser.add_argument(
'--save_path',
dest='save_path',
default='/data/Expr_Recog/Chen_Huang_avdata_python_augmented/npy_files/all/global_stats/',
help='Folder to save output .npy files.')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
dataset_path = args.dataset_path
save_path = args.save_path
feat_type = args.feat_type
fold_type = args.fold_type
save_path = os.path.join(save_path, feat_type)
fold_inds = numpy.load(
os.path.join(dataset_path, 'folds', fold_type, 'fold_inds.npy'))
if feat_type == 'landmarks':
data = numpy.load(os.path.join(dataset_path, 'landmarks.npy'))
elif feat_type == 'landmarks_diff':
data = numpy.load(os.path.join(dataset_path, 'landmarks_diff.npy'))
global_stats = {}
fold = 0
for train_inds, test_inds in fold_inds:
print 'Fold %d' % fold
print 'Train, Test Split sizes: ', train_inds.shape, test_inds.shape
data_train = data[train_inds, :]
data_test = data[test_inds, :]
print 'data_train: %s --- data_test: %s' % (data_train.shape,
data_test.shape)
mean_train, std_train = compute_mean_std(data_train)
# data_train_norm = (data_train - mean_train) / (std_train + 1e-6)
# print numpy.mean(landmark_diff_train_norm, axis=0)
# print numpy.std(landmark_diff_train_norm, axis=0)
# print landmark_diff_train_norm.shape
global_stats[fold] = {}
global_stats[fold]['mean'] = mean_train
global_stats[fold]['std'] = std_train
fold += 1
print ''
print 'Saving to .npy file.'
if not os.path.exists(save_path):
os.makedirs(save_path)
numpy.save(
os.path.join(save_path, 'global_stats_' + fold_type + '.npy'),
global_stats)
| gpl-3.0 | 2,664,364,548,319,919,000 | 31.172043 | 98 | 0.597259 | false |
Passtechsoft/TPEAlpGen | blender/release/scripts/addons/render_auto_tile_size.py | 1 | 15195 | # BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# END GPL LICENSE BLOCK #####
bl_info = {
"name": "Auto Tile Size",
"description": "Estimate and set the tile size that will render the fastest",
"author": "Greg Zaal",
"version": (3, 1, 1),
"blender": (2, 74, 0),
"location": "Render Settings > Performance",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php?title=Extensions:2.6/Py/Scripts/Render/Auto_Tile_Size",
"category": "Render",
}
import bpy
from bpy.app.handlers import persistent
from math import ceil, floor, sqrt
SUPPORTED_RENDER_ENGINES = {'CYCLES', 'BLENDER_RENDER'}
TILE_SIZES = (
('16', "16", "16 x 16"),
('32', "32", "32 x 32"),
('64', "64", "64 x 64"),
('128', "128", "128 x 128"),
('256', "256", "256 x 256"),
('512', "512", "512 x 512"),
('1024', "1024", "1024 x 1024"),
)
def _update_tile_size(self, context):
do_set_tile_size(context)
class AutoTileSizeSettings(bpy.types.PropertyGroup):
gpu_choice = bpy.props.EnumProperty(
name="Target GPU Tile Size",
items=TILE_SIZES,
default='256',
description="Square dimensions of tiles for GPU rendering",
update=_update_tile_size)
cpu_choice = bpy.props.EnumProperty(
name="Target CPU Tile Size",
items=TILE_SIZES,
default='32',
description="Square dimensions of tiles for CPU rendering",
update=_update_tile_size)
bi_choice = bpy.props.EnumProperty(
name="Target CPU Tile Size",
items=TILE_SIZES,
default='64',
description="Square dimensions of tiles",
update=_update_tile_size)
gpu_custom = bpy.props.IntProperty(
name="Target Size",
default=256,
min=8, # same as blender's own limits
max=65536,
description="Custom target tile size for GPU rendering",
update=_update_tile_size)
cpu_custom = bpy.props.IntProperty(
name="Target Size",
default=32,
min=8, # same as blender's own limits
max=65536,
description="Custom target tile size for CPU rendering",
update=_update_tile_size)
bi_custom = bpy.props.IntProperty(
name="Target Size",
default=64,
min=8, # same as blender's own limits
max=65536,
description="Custom target tile size",
update=_update_tile_size)
target_type = bpy.props.EnumProperty(
name="Target tile size",
items=(
('po2', "Po2", "A choice between powers of 2 (16, 32, 64...)"),
('custom', "Custom", "Choose any number as the tile size target")),
default='po2',
description="Method of choosing the target tile size",
update=_update_tile_size)
use_optimal = bpy.props.BoolProperty(
name="Optimal Tiles",
default=True,
description="Try to find a similar tile size for best performance, instead of using exact selected one",
update=_update_tile_size)
is_enabled = bpy.props.BoolProperty(
name="Auto Tile Size",
default=True,
description="Calculate the best tile size based on factors of the render size and the chosen target",
update=_update_tile_size)
use_advanced_ui = bpy.props.BoolProperty(
name="Advanced Settings",
default=False,
description="Show extra options for more control over the calculated tile size")
thread_error_correct = bpy.props.BoolProperty(
name="Fix",
default=True,
description="Reduce the tile size so that all your available threads are used",
update=_update_tile_size)
# Internally used props (not for GUI)
first_run = bpy.props.BoolProperty(default=True, options={'HIDDEN'})
threads_error = bpy.props.BoolProperty(options={'HIDDEN'})
num_tiles = bpy.props.IntVectorProperty(default=(0, 0), size=2, options={'HIDDEN'})
prev_choice = bpy.props.StringProperty(default='', options={'HIDDEN'})
prev_engine = bpy.props.StringProperty(default='', options={'HIDDEN'})
prev_device = bpy.props.StringProperty(default='', options={'HIDDEN'})
prev_res = bpy.props.IntVectorProperty(default=(0, 0), size=2, options={'HIDDEN'})
prev_border = bpy.props.BoolProperty(default=False, options={'HIDDEN'})
prev_border_res = bpy.props.FloatVectorProperty(default=(0, 0, 0, 0), size=4, options={'HIDDEN'})
prev_actual_tile_size = bpy.props.IntVectorProperty(default=(0, 0), size=2, options={'HIDDEN'})
prev_threads = bpy.props.IntProperty(default=0, options={'HIDDEN'})
def ats_poll(context):
scene = context.scene
if scene.render.engine not in SUPPORTED_RENDER_ENGINES or not scene.ats_settings.is_enabled:
return False
return True
def engine_is_gpu(engine, device, userpref):
return engine == 'CYCLES' and device == 'GPU' and userpref.system.compute_device_type != 'NONE'
def get_tilesize_prop(engine, device, userpref):
target_type = "_choice" if bpy.context.scene.ats_settings.target_type == 'po2' else "_custom"
if engine_is_gpu(engine, device, userpref):
return ("gpu" + target_type)
elif engine == 'CYCLES':
return ("cpu" + target_type)
return ("bi" + target_type)
@persistent
def on_scene_update(scene):
context = bpy.context
if not ats_poll(context):
return
userpref = context.user_preferences
settings = scene.ats_settings
render = scene.render
engine = render.engine
# scene.cycles might not always exist (Cycles is an addon)...
device = scene.cycles.device if engine == 'CYCLES' else settings.prev_device
border = render.use_border
threads = get_threads(context, device)
choice = getattr(settings, get_tilesize_prop(engine, device, userpref))
res = get_actual_res(render)
actual_ts = (render.tile_x, render.tile_y)
border_res = (render.border_min_x, render.border_min_y, render.border_max_x, render.border_max_y)
# detect relevant changes in scene
do_change = (engine != settings.prev_engine or
device != settings.prev_device or
border != settings.prev_border or
threads != settings.prev_threads or
str(choice) != settings.prev_choice or
res != settings.prev_res[:] or
border_res != settings.prev_border_res[:] or
actual_ts != settings.prev_actual_tile_size[:])
if do_change:
do_set_tile_size(context)
def get_actual_res(render):
rend_percent = render.resolution_percentage * 0.01
# floor is implicitly done by int conversion...
return (int(render.resolution_x * rend_percent), int(render.resolution_y * rend_percent))
def get_threads(context, device):
render = context.scene.render
engine = render.engine
userpref = context.user_preferences
if engine_is_gpu(engine, device, userpref):
gpu_device_str = userpref.system.compute_device
if 'MULTI' in gpu_device_str:
threads = int(gpu_device_str.split('_')[-1])
else:
threads = 1
else:
threads = render.threads
return threads
def max_tile_size(threads, xres, yres):
''' Give the largest tile size that will still use all threads '''
render_area = xres * yres
tile_area = render_area / threads
tile_length = sqrt(tile_area)
# lists: num x tiles, num y tiles, squareness, total tiles
perfect_attempts = [] # attempts with correct number of tiles
attempts = [] # all attempts, even if incorrect number of tiles
axes = [xres, yres]
funcs = [floor, ceil]
for axis in axes:
sec_axis = yres if axis == xres else xres
for func in funcs:
primary = func(axis / tile_length)
if primary > 0:
secondary = threads / primary
ts_p = axis/primary
ts_s = sec_axis/secondary
squareness = max(ts_p, ts_s) - min(ts_p, ts_s)
attempt = [primary if axis == xres else secondary, primary if axis != xres else secondary, squareness, primary * secondary]
if attempt not in attempts:
attempts.append(attempt)
if secondary.is_integer(): # will only be an integer if there are the right number of tiles
perfect_attempts.append(attempt)
if perfect_attempts: # prefer to use attempt that has exactly the right number of tiles
attempts = perfect_attempts
attempt = sorted(attempts, key=lambda k: k[2])[0] # pick set with most square tiles
numtiles_x = round(attempt[0])
numtiles_y = round(attempt[1])
tile_x = ceil(xres / numtiles_x)
tile_y = ceil(yres / numtiles_y)
return (tile_x, tile_y)
def do_set_tile_size(context):
if not ats_poll(context):
return False
scene = context.scene
userpref = context.user_preferences
settings = scene.ats_settings
render = scene.render
engine = render.engine
device = scene.cycles.device if engine == 'CYCLES' else settings.prev_device
border = render.use_border
realxres, realyres = xres, yres = res = get_actual_res(scene.render)
if border:
xres = round(xres * (render.border_max_x - render.border_min_x))
yres = round(yres * (render.border_max_y - render.border_min_y))
choice = getattr(settings, get_tilesize_prop(engine, device, userpref))
target = int(choice)
numtiles_x = ceil(xres / target)
numtiles_y = ceil(yres / target)
settings.num_tiles = (numtiles_x, numtiles_y)
if settings.use_optimal:
tile_x = ceil(xres / numtiles_x)
tile_y = ceil(yres / numtiles_y)
else:
tile_x = target
tile_y = target
# Print tile size (for debug purposes)
# print("Tile size: %dx%d (%dx%d tiles)" % (tile_x, tile_y, ceil(xres / tile_x), ceil(yres / tile_y)))
# Detect if there are fewer tiles than available threads
threads = get_threads(context, device)
if ((numtiles_x * numtiles_y) < threads):
settings.threads_error = True
if settings.thread_error_correct:
tile_x, tile_y = max_tile_size(threads, xres, yres)
settings.num_tiles = (ceil(xres/tile_x), ceil(yres/tile_y))
else:
settings.threads_error = False
# Make sure tile sizes are within the internal limit
tile_x = max(8, tile_x)
tile_y = max(8, tile_y)
tile_x = min(65536, tile_x)
tile_y = min(65536, tile_y)
render.tile_x = tile_x
render.tile_y = tile_y
settings.prev_engine = engine
settings.prev_device = device
settings.prev_border = border
settings.prev_threads = threads
settings.prev_choice = str(choice)
settings.prev_res = res
settings.prev_border_res = (render.border_min_x, render.border_min_y, render.border_max_x, render.border_max_y)
settings.prev_actual_tile_size = (tile_x, tile_y)
settings.first_run = False
return True
class SetTileSize(bpy.types.Operator):
"""The first render may not obey the tile-size set here"""
bl_idname = "render.autotilesize_set"
bl_label = "Set"
@classmethod
def poll(clss, context):
return ats_poll(context)
def execute(self, context):
if do_set_tile_size(context):
return {'FINISHED'}
return {'CANCELLED'}
# ##### INTERFACE #####
def ui_layout(engine, layout, context):
scene = context.scene
userpref = context.user_preferences
settings = scene.ats_settings
render = scene.render
engine = render.engine
device = scene.cycles.device if engine == 'CYCLES' else settings.prev_device
col = layout.column(align=True)
sub = col.column(align=True)
row = sub.row(align=True)
row.prop(settings, "is_enabled", toggle=True)
row.prop(settings, "use_advanced_ui", toggle=True, text="", icon='PREFERENCES')
sub = col.column(align=False)
sub.enabled = settings.is_enabled
if settings.use_advanced_ui:
row = sub.row(align=True)
row.label("Target tile size:")
row.separator()
row.prop(settings, "target_type", expand=True)
row = sub.row(align=True)
row.prop(settings, get_tilesize_prop(engine, device, userpref), expand=True)
sub.prop(settings, "use_optimal", text="Calculate Optimal Size")
sub.label("Number of tiles: %s x %s (Total: %s)" % (settings.num_tiles[0], settings.num_tiles[1], settings.num_tiles[0] * settings.num_tiles[1]))
if settings.first_run:
sub = layout.column(align=True)
sub.operator("render.autotilesize_set", text="First-render fix", icon='ERROR')
elif settings.prev_device != device:
sub = layout.column(align=True)
sub.operator("render.autotilesize_set", text="Device changed - fix", icon='ERROR')
if (render.tile_x / render.tile_y > 2) or (render.tile_x / render.tile_y < 0.5): # if not very square tile
sub.label(text="Warning: Tile size is not very square", icon='ERROR')
sub.label(text=" Try a slightly different resolution")
if settings.threads_error:
row = sub.row(align=True)
row.alignment = 'CENTER'
row.label(text="Warning: Fewer tiles than threads", icon='ERROR')
row.prop(settings, 'thread_error_correct')
def menu_func_cycles(self, context):
ui_layout('CYCLES', self.layout, context)
def menu_func_bi(self, context):
ui_layout('BLENDER_RENDER', self.layout, context)
# ##### REGISTRATION #####
def register():
bpy.utils.register_module(__name__)
bpy.types.Scene.ats_settings = bpy.props.PointerProperty(type=AutoTileSizeSettings)
# Note, the Cycles addon must be registered first, otherwise this panel doesn't exist - better be safe here!
cycles_panel = getattr(bpy.types, "CyclesRender_PT_performance", None)
if cycles_panel is not None:
cycles_panel.append(menu_func_cycles)
bpy.types.RENDER_PT_performance.append(menu_func_bi)
bpy.app.handlers.scene_update_post.append(on_scene_update)
def unregister():
bpy.app.handlers.scene_update_post.remove(on_scene_update)
bpy.types.RENDER_PT_performance.remove(menu_func_bi)
cycles_panel = getattr(bpy.types, "CyclesRender_PT_performance", None)
if cycles_panel is not None:
cycles_panel.remove(menu_func_cycles)
del bpy.types.Scene.ats_settings
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| gpl-3.0 | 1,330,872,926,750,225,400 | 34.502336 | 153 | 0.642777 | false |
misli/cmsplugin-articles | cmsplugin_articles/models.py | 1 | 4565 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement
from cms.models import Page, CMSPlugin, PlaceholderField
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from polymorphic.models import PolymorphicModel
from .utils import get_html_field
# allow different implementation of HTMLField
HTMLField = get_html_field()
@python_2_unicode_compatible
class Article(PolymorphicModel):
namespace = models.CharField(_('Application instance'), max_length=200)
title = models.CharField(_('Title'), max_length=250)
slug = models.SlugField(_('Slug'), max_length=250, db_index=True, unique=False)
pub_date = models.DateField(_('Publication date'), editable=True)
perex = HTMLField(_('Perex'), blank=True, default='')
text = PlaceholderField('article_text')
page_title = models.CharField(_('Page title'), max_length=250, blank=True, null=True,
help_text=_('Overwrite the title (html title tag)'))
menu_title = models.CharField(_('Menu title'), max_length=250, blank=True, null=True,
help_text=_('Overwrite the title in the menu'))
meta_desc = models.TextField(_('Meta description'), blank=True, default='',
help_text=_('The text displayed in search engines.'))
public = models.BooleanField(default=False, verbose_name=_('Public'))
class Meta:
ordering = ('-pub_date',)
unique_together = [('pub_date', 'slug')]
verbose_name = _('Article')
verbose_name_plural = _('Articles')
def __str__(self):
return self.title
def save(self, *args, **kwargs):
errors = self._perform_unique_checks([(Article, ('pub_date', 'slug'))])
if errors:
raise ValidationError(errors)
super(Article, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse(
'{}:detail'.format(self.namespace),
kwargs={
'year': '{:%Y}'.format(self.pub_date),
'month':'{:%m}'.format(self.pub_date),
'day': '{:%d}'.format(self.pub_date),
'slug': self.slug,
},
)
def get_edit_url(self):
return reverse('admin:{}_{}_change'.format(self._meta.app_label, self._meta.model_name), args=(self.id,))
def get_page(self):
return Page.objects.get(application_namespace=self.namespace, publisher_is_draft=False)
def get_title(self):
return self.title
def get_page_title(self):
return self.page_title or self.title
def get_menu_title(self):
return self.menu_title or self.title
ARTICLE_TEMPLATES = getattr(settings, 'CMSPLUGIN_ARTICLES_ARTICLE_TEMPLATES', (
('default', _('Default')),
))
LAST_ARTICLES_TEMPLATES = getattr(settings, 'CMSPLUGIN_ARTICLES_LAST_ARTICLES_TEMPLATES', (
('default', _('Default')),
))
@python_2_unicode_compatible
class ArticlePlugin(CMSPlugin):
article = models.ForeignKey(Article, verbose_name=_('Article'))
template = models.CharField(_('Template'), max_length=100, choices=ARTICLE_TEMPLATES,
default=ARTICLE_TEMPLATES[0][0],
help_text=_('The template used to render plugin.'))
def __str__(self):
return self.article.get_title()
@cached_property
def render_template(self):
return 'cmsplugin_articles/article/%s.html' % self.template
@python_2_unicode_compatible
class LastArticlesPlugin(CMSPlugin):
number = models.IntegerField(_('Number of last articles'), default=3)
template = models.CharField(_('Template'), max_length=100, choices=LAST_ARTICLES_TEMPLATES,
default=LAST_ARTICLES_TEMPLATES[0][0],
help_text=_('The template used to render plugin.'))
def __str__(self):
return _('last {} articles').format(self.number)
@cached_property
def articles(self):
return Article.objects.order_by('-pub_date')[:self.number]
@cached_property
def render_template(self):
return 'cmsplugin_articles/last_articles/%s.html' % self.template
| bsd-3-clause | -1,204,766,179,426,961,700 | 36.418033 | 125 | 0.631763 | false |
splotz90/urh | src/urh/ui/ui_options.py | 1 | 25225 | # -*- coding: utf-8 -*-
#
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DialogOptions(object):
def setupUi(self, DialogOptions):
DialogOptions.setObjectName("DialogOptions")
DialogOptions.resize(696, 653)
icon = QtGui.QIcon.fromTheme("configure")
DialogOptions.setWindowIcon(icon)
self.verticalLayout_6 = QtWidgets.QVBoxLayout(DialogOptions)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.tabWidget = QtWidgets.QTabWidget(DialogOptions)
self.tabWidget.setObjectName("tabWidget")
self.tabGeneration = QtWidgets.QWidget()
self.tabGeneration.setObjectName("tabGeneration")
self.layoutWidget = QtWidgets.QWidget(self.tabGeneration)
self.layoutWidget.setGeometry(QtCore.QRect(20, 20, 314, 62))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout_4 = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.checkBoxDefaultFuzzingPause = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBoxDefaultFuzzingPause.setObjectName("checkBoxDefaultFuzzingPause")
self.gridLayout_4.addWidget(self.checkBoxDefaultFuzzingPause, 0, 0, 1, 2)
self.doubleSpinBoxFuzzingPause = KillerDoubleSpinBox(self.layoutWidget)
self.doubleSpinBoxFuzzingPause.setDecimals(3)
self.doubleSpinBoxFuzzingPause.setMaximum(999999999.0)
self.doubleSpinBoxFuzzingPause.setObjectName("doubleSpinBoxFuzzingPause")
self.gridLayout_4.addWidget(self.doubleSpinBoxFuzzingPause, 1, 0, 1, 1)
self.labelFuzzingSamples = QtWidgets.QLabel(self.layoutWidget)
self.labelFuzzingSamples.setObjectName("labelFuzzingSamples")
self.gridLayout_4.addWidget(self.labelFuzzingSamples, 1, 1, 1, 1)
self.tabWidget.addTab(self.tabGeneration, "")
self.tabView = QtWidgets.QWidget()
self.tabView.setObjectName("tabView")
self.verticalLayout = QtWidgets.QVBoxLayout(self.tabView)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_7 = QtWidgets.QLabel(self.tabView)
self.label_7.setObjectName("label_7")
self.horizontalLayout_2.addWidget(self.label_7)
self.comboBoxDefaultView = QtWidgets.QComboBox(self.tabView)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBoxDefaultView.sizePolicy().hasHeightForWidth())
self.comboBoxDefaultView.setSizePolicy(sizePolicy)
self.comboBoxDefaultView.setObjectName("comboBoxDefaultView")
self.comboBoxDefaultView.addItem("")
self.comboBoxDefaultView.addItem("")
self.comboBoxDefaultView.addItem("")
self.horizontalLayout_2.addWidget(self.comboBoxDefaultView)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.checkBoxShowConfirmCloseDialog = QtWidgets.QCheckBox(self.tabView)
self.checkBoxShowConfirmCloseDialog.setObjectName("checkBoxShowConfirmCloseDialog")
self.verticalLayout.addWidget(self.checkBoxShowConfirmCloseDialog)
self.checkBoxHoldShiftToDrag = QtWidgets.QCheckBox(self.tabView)
self.checkBoxHoldShiftToDrag.setObjectName("checkBoxHoldShiftToDrag")
self.verticalLayout.addWidget(self.checkBoxHoldShiftToDrag)
self.checkBoxPauseTime = QtWidgets.QCheckBox(self.tabView)
self.checkBoxPauseTime.setObjectName("checkBoxPauseTime")
self.verticalLayout.addWidget(self.checkBoxPauseTime)
self.checkBoxAlignLabels = QtWidgets.QCheckBox(self.tabView)
self.checkBoxAlignLabels.setObjectName("checkBoxAlignLabels")
self.verticalLayout.addWidget(self.checkBoxAlignLabels)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_9 = QtWidgets.QLabel(self.tabView)
self.label_9.setObjectName("label_9")
self.horizontalLayout_4.addWidget(self.label_9)
self.comboBoxTheme = QtWidgets.QComboBox(self.tabView)
self.comboBoxTheme.setObjectName("comboBoxTheme")
self.comboBoxTheme.addItem("")
self.comboBoxTheme.addItem("")
self.comboBoxTheme.addItem("")
self.horizontalLayout_4.addWidget(self.comboBoxTheme)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.labelIconTheme = QtWidgets.QLabel(self.tabView)
self.labelIconTheme.setObjectName("labelIconTheme")
self.horizontalLayout_5.addWidget(self.labelIconTheme)
self.comboBoxIconTheme = QtWidgets.QComboBox(self.tabView)
self.comboBoxIconTheme.setObjectName("comboBoxIconTheme")
self.comboBoxIconTheme.addItem("")
self.comboBoxIconTheme.addItem("")
self.horizontalLayout_5.addWidget(self.comboBoxIconTheme)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.groupBoxSpectrogramColormap = QtWidgets.QGroupBox(self.tabView)
self.groupBoxSpectrogramColormap.setObjectName("groupBoxSpectrogramColormap")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBoxSpectrogramColormap)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.scrollAreaSpectrogramColormap = QtWidgets.QScrollArea(self.groupBoxSpectrogramColormap)
self.scrollAreaSpectrogramColormap.setWidgetResizable(True)
self.scrollAreaSpectrogramColormap.setObjectName("scrollAreaSpectrogramColormap")
self.scrollAreaWidgetSpectrogramColormapContents = QtWidgets.QWidget()
self.scrollAreaWidgetSpectrogramColormapContents.setGeometry(QtCore.QRect(0, 0, 644, 316))
self.scrollAreaWidgetSpectrogramColormapContents.setObjectName("scrollAreaWidgetSpectrogramColormapContents")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetSpectrogramColormapContents)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.scrollAreaSpectrogramColormap.setWidget(self.scrollAreaWidgetSpectrogramColormapContents)
self.verticalLayout_2.addWidget(self.scrollAreaSpectrogramColormap)
self.verticalLayout.addWidget(self.groupBoxSpectrogramColormap)
self.tabWidget.addTab(self.tabView, "")
self.tabFieldtypes = QtWidgets.QWidget()
self.tabFieldtypes.setObjectName("tabFieldtypes")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.tabFieldtypes)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.tblLabeltypes = QtWidgets.QTableView(self.tabFieldtypes)
self.tblLabeltypes.setAlternatingRowColors(True)
self.tblLabeltypes.setObjectName("tblLabeltypes")
self.horizontalLayout_3.addWidget(self.tblLabeltypes)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.btnAddLabelType = QtWidgets.QToolButton(self.tabFieldtypes)
icon = QtGui.QIcon.fromTheme("list-add")
self.btnAddLabelType.setIcon(icon)
self.btnAddLabelType.setObjectName("btnAddLabelType")
self.verticalLayout_3.addWidget(self.btnAddLabelType)
self.btnRemoveLabeltype = QtWidgets.QToolButton(self.tabFieldtypes)
icon = QtGui.QIcon.fromTheme("list-remove")
self.btnRemoveLabeltype.setIcon(icon)
self.btnRemoveLabeltype.setObjectName("btnRemoveLabeltype")
self.verticalLayout_3.addWidget(self.btnRemoveLabeltype)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.horizontalLayout_3.addLayout(self.verticalLayout_3)
self.verticalLayout_5.addLayout(self.horizontalLayout_3)
spacerItem1 = QtWidgets.QSpacerItem(20, 203, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem1)
self.tabWidget.addTab(self.tabFieldtypes, "")
self.tab_plugins = QtWidgets.QWidget()
self.tab_plugins.setObjectName("tab_plugins")
self.tabWidget.addTab(self.tab_plugins, "")
self.tabDevices = QtWidgets.QWidget()
self.tabDevices.setObjectName("tabDevices")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.tabDevices)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.listWidgetDevices = QtWidgets.QListWidget(self.tabDevices)
self.listWidgetDevices.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.listWidgetDevices.setAlternatingRowColors(True)
self.listWidgetDevices.setViewMode(QtWidgets.QListView.ListMode)
self.listWidgetDevices.setObjectName("listWidgetDevices")
self.horizontalLayout.addWidget(self.listWidgetDevices)
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.chkBoxDeviceEnabled = QtWidgets.QCheckBox(self.tabDevices)
self.chkBoxDeviceEnabled.setObjectName("chkBoxDeviceEnabled")
self.verticalLayout_7.addWidget(self.chkBoxDeviceEnabled)
self.rbNativeBackend = QtWidgets.QRadioButton(self.tabDevices)
self.rbNativeBackend.setObjectName("rbNativeBackend")
self.verticalLayout_7.addWidget(self.rbNativeBackend)
self.rbGnuradioBackend = QtWidgets.QRadioButton(self.tabDevices)
self.rbGnuradioBackend.setObjectName("rbGnuradioBackend")
self.verticalLayout_7.addWidget(self.rbGnuradioBackend)
self.btnHealthCheck = QtWidgets.QPushButton(self.tabDevices)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnHealthCheck.sizePolicy().hasHeightForWidth())
self.btnHealthCheck.setSizePolicy(sizePolicy)
icon = QtGui.QIcon.fromTheme("heart")
self.btnHealthCheck.setIcon(icon)
self.btnHealthCheck.setObjectName("btnHealthCheck")
self.verticalLayout_7.addWidget(self.btnHealthCheck)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem2)
self.horizontalLayout.addLayout(self.verticalLayout_7)
self.verticalLayout_8.addLayout(self.horizontalLayout)
self.lSupport = QtWidgets.QLabel(self.tabDevices)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lSupport.sizePolicy().hasHeightForWidth())
self.lSupport.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.lSupport.setFont(font)
self.lSupport.setStyleSheet("color: green")
self.lSupport.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lSupport.setObjectName("lSupport")
self.verticalLayout_8.addWidget(self.lSupport)
self.labelWindowsError = QtWidgets.QLabel(self.tabDevices)
self.labelWindowsError.setWordWrap(True)
self.labelWindowsError.setObjectName("labelWindowsError")
self.verticalLayout_8.addWidget(self.labelWindowsError)
self.line = QtWidgets.QFrame(self.tabDevices)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_8.addWidget(self.line)
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_8 = QtWidgets.QLabel(self.tabDevices)
self.label_8.setObjectName("label_8")
self.gridLayout_3.addWidget(self.label_8, 0, 0, 1, 1)
self.spinBoxNumSendingRepeats = QtWidgets.QSpinBox(self.tabDevices)
self.spinBoxNumSendingRepeats.setProperty("showGroupSeparator", False)
self.spinBoxNumSendingRepeats.setMaximum(999999999)
self.spinBoxNumSendingRepeats.setDisplayIntegerBase(10)
self.spinBoxNumSendingRepeats.setObjectName("spinBoxNumSendingRepeats")
self.gridLayout_3.addWidget(self.spinBoxNumSendingRepeats, 0, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.tabDevices)
self.label_5.setObjectName("label_5")
self.gridLayout_3.addWidget(self.label_5, 1, 0, 1, 1)
self.doubleSpinBoxRAMThreshold = QtWidgets.QDoubleSpinBox(self.tabDevices)
self.doubleSpinBoxRAMThreshold.setMinimum(1.0)
self.doubleSpinBoxRAMThreshold.setMaximum(100.0)
self.doubleSpinBoxRAMThreshold.setObjectName("doubleSpinBoxRAMThreshold")
self.gridLayout_3.addWidget(self.doubleSpinBoxRAMThreshold, 1, 1, 1, 1)
self.verticalLayout_8.addLayout(self.gridLayout_3)
self.line_2 = QtWidgets.QFrame(self.tabDevices)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_8.addWidget(self.line_2)
self.groupBox_3 = QtWidgets.QGroupBox(self.tabDevices)
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_11 = QtWidgets.QLabel(self.groupBox_3)
font = QtGui.QFont()
font.setItalic(True)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.gridLayout_2.addWidget(self.label_11, 0, 0, 1, 2)
self.lineEditPython2Interpreter = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEditPython2Interpreter.setObjectName("lineEditPython2Interpreter")
self.gridLayout_2.addWidget(self.lineEditPython2Interpreter, 1, 1, 1, 1)
self.lGnuradioInstalled = QtWidgets.QLabel(self.groupBox_3)
self.lGnuradioInstalled.setStyleSheet("")
self.lGnuradioInstalled.setObjectName("lGnuradioInstalled")
self.gridLayout_2.addWidget(self.lGnuradioInstalled, 3, 0, 1, 2)
self.lineEditGnuradioDirectory = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEditGnuradioDirectory.setEnabled(True)
self.lineEditGnuradioDirectory.setObjectName("lineEditGnuradioDirectory")
self.gridLayout_2.addWidget(self.lineEditGnuradioDirectory, 2, 1, 1, 1)
self.radioButtonPython2Interpreter = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButtonPython2Interpreter.setObjectName("radioButtonPython2Interpreter")
self.gridLayout_2.addWidget(self.radioButtonPython2Interpreter, 1, 0, 1, 1)
self.radioButtonGnuradioDirectory = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButtonGnuradioDirectory.setObjectName("radioButtonGnuradioDirectory")
self.gridLayout_2.addWidget(self.radioButtonGnuradioDirectory, 2, 0, 1, 1)
self.verticalLayout_8.addWidget(self.groupBox_3)
self.groupBoxNativeOptions = QtWidgets.QGroupBox(self.tabDevices)
self.groupBoxNativeOptions.setObjectName("groupBoxNativeOptions")
self.gridLayout_5 = QtWidgets.QGridLayout(self.groupBoxNativeOptions)
self.gridLayout_5.setObjectName("gridLayout_5")
self.labelLibDirs = QtWidgets.QLabel(self.groupBoxNativeOptions)
self.labelLibDirs.setObjectName("labelLibDirs")
self.gridLayout_5.addWidget(self.labelLibDirs, 2, 0, 1, 1)
self.btnRebuildNative = QtWidgets.QPushButton(self.groupBoxNativeOptions)
self.btnRebuildNative.setEnabled(True)
icon = QtGui.QIcon.fromTheme("view-refresh")
self.btnRebuildNative.setIcon(icon)
self.btnRebuildNative.setObjectName("btnRebuildNative")
self.gridLayout_5.addWidget(self.btnRebuildNative, 3, 0, 1, 1)
self.labelNativeRebuildInfo = QtWidgets.QLabel(self.groupBoxNativeOptions)
self.labelNativeRebuildInfo.setWordWrap(True)
self.labelNativeRebuildInfo.setObjectName("labelNativeRebuildInfo")
self.gridLayout_5.addWidget(self.labelNativeRebuildInfo, 1, 0, 1, 3)
self.lineEditLibDirs = QtWidgets.QLineEdit(self.groupBoxNativeOptions)
self.lineEditLibDirs.setObjectName("lineEditLibDirs")
self.gridLayout_5.addWidget(self.lineEditLibDirs, 2, 2, 1, 1)
self.labelRebuildNativeStatus = QtWidgets.QLabel(self.groupBoxNativeOptions)
self.labelRebuildNativeStatus.setObjectName("labelRebuildNativeStatus")
self.gridLayout_5.addWidget(self.labelRebuildNativeStatus, 3, 2, 1, 1)
self.verticalLayout_8.addWidget(self.groupBoxNativeOptions)
self.tabWidget.addTab(self.tabDevices, "")
self.verticalLayout_6.addWidget(self.tabWidget)
self.retranslateUi(DialogOptions)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(DialogOptions)
def retranslateUi(self, DialogOptions):
_translate = QtCore.QCoreApplication.translate
DialogOptions.setWindowTitle(_translate("DialogOptions", "Options"))
self.checkBoxDefaultFuzzingPause.setToolTip(_translate("DialogOptions", "<html><head/><body><p>If you disable the default pause, the pause of the fuzzed message will be used.</p></body></html>"))
self.checkBoxDefaultFuzzingPause.setText(_translate("DialogOptions", "Use a default pause for fuzzed messages"))
self.labelFuzzingSamples.setText(_translate("DialogOptions", "Samples"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabGeneration), _translate("DialogOptions", "Generation"))
self.label_7.setText(_translate("DialogOptions", "Default View:"))
self.comboBoxDefaultView.setItemText(0, _translate("DialogOptions", "Bit"))
self.comboBoxDefaultView.setItemText(1, _translate("DialogOptions", "Hex"))
self.comboBoxDefaultView.setItemText(2, _translate("DialogOptions", "ASCII"))
self.checkBoxShowConfirmCloseDialog.setText(_translate("DialogOptions", "Show \"confirm close\" dialog"))
self.checkBoxHoldShiftToDrag.setToolTip(_translate("DialogOptions", "<html><head/><body><p>If checked, you need to <span style=\" font-weight:600;\">hold the Shift key to drag</span> with the mouse inside graphic views like the drawn signal in Interpreation tab, while making a selection with the mouse does not require holding any buttons.</p><p>If unchecked, this is inverted: Hold shift to make a selection, and drag by default.</p></body></html>"))
self.checkBoxHoldShiftToDrag.setText(_translate("DialogOptions", "Hold shift to drag"))
self.checkBoxPauseTime.setText(_translate("DialogOptions", "Show pauses as time"))
self.checkBoxAlignLabels.setText(_translate("DialogOptions", "Align on labels"))
self.label_9.setText(_translate("DialogOptions", "Choose application theme (requires restart):"))
self.comboBoxTheme.setItemText(0, _translate("DialogOptions", "native look (default)"))
self.comboBoxTheme.setItemText(1, _translate("DialogOptions", "fallback theme"))
self.comboBoxTheme.setItemText(2, _translate("DialogOptions", "fallback theme (dark)"))
self.labelIconTheme.setText(_translate("DialogOptions", "Choose icon theme (requires restart):"))
self.comboBoxIconTheme.setItemText(0, _translate("DialogOptions", "bundled icons (default)"))
self.comboBoxIconTheme.setItemText(1, _translate("DialogOptions", "native icon theme"))
self.groupBoxSpectrogramColormap.setTitle(_translate("DialogOptions", "Spectrogram Colormap"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabView), _translate("DialogOptions", "View"))
self.btnAddLabelType.setText(_translate("DialogOptions", "..."))
self.btnRemoveLabeltype.setText(_translate("DialogOptions", "..."))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabFieldtypes), _translate("DialogOptions", "Fieldtypes"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_plugins), _translate("DialogOptions", "Plugins"))
self.chkBoxDeviceEnabled.setText(_translate("DialogOptions", "Enabled"))
self.rbNativeBackend.setText(_translate("DialogOptions", "Native backend (recommended)"))
self.rbGnuradioBackend.setText(_translate("DialogOptions", "Gnuradio backend"))
self.btnHealthCheck.setToolTip(_translate("DialogOptions", "Perform a health check for native device extensions."))
self.btnHealthCheck.setText(_translate("DialogOptions", "Health Check"))
self.lSupport.setText(_translate("DialogOptions", "device supports sending and receiving"))
self.labelWindowsError.setText(_translate("DialogOptions", "<html><head/><body><p><span style=\" color:#ff0000;\">Detected a 32 bit installation of python 3.</span> Install <span style=\" font-weight:600;\">64 bit version</span> to use native backends.</p></body></html>"))
self.label_8.setText(_translate("DialogOptions", "Default sending repititions:"))
self.spinBoxNumSendingRepeats.setSpecialValueText(_translate("DialogOptions", "Infinite"))
self.label_5.setText(_translate("DialogOptions", "Use this percentage of available RAM for buffer allocation:"))
self.doubleSpinBoxRAMThreshold.setSuffix(_translate("DialogOptions", "%"))
self.groupBox_3.setTitle(_translate("DialogOptions", "Gnuradio options"))
self.label_11.setText(_translate("DialogOptions", "Needed for Gnuradio backend only"))
self.lineEditPython2Interpreter.setToolTip(_translate("DialogOptions", "<html><head/><body><p>Use this option if you installed Gnuradio with your package manager e.g. on Linux and Mac OS X.</p></body></html>"))
self.lineEditPython2Interpreter.setPlaceholderText(_translate("DialogOptions", "/usr/bin/python2"))
self.lGnuradioInstalled.setText(_translate("DialogOptions", "Gnuradio installation found"))
self.lineEditGnuradioDirectory.setToolTip(_translate("DialogOptions", "<html><head/><body><p>If you installed Gnuradio with a bundled python interpreter, you need to enter the site-packages path of the installation here. The path should be something like <span style=\" font-style:italic;\">C:\\Program Files\\GNURadio-3.7</span>.</p></body></html>"))
self.lineEditGnuradioDirectory.setPlaceholderText(_translate("DialogOptions", "C:\\...\\Gnuradio"))
self.radioButtonPython2Interpreter.setToolTip(_translate("DialogOptions", "<html><head/><body><p>Use this option if you installed Gnuradio with your package manager e.g. on Linux and Mac OS X.</p></body></html>"))
self.radioButtonPython2Interpreter.setText(_translate("DialogOptions", "Python2 interpreter"))
self.radioButtonGnuradioDirectory.setToolTip(_translate("DialogOptions", "<html><head/><body><p>If you installed Gnuradio with a bundled python interpreter, you need to enter the site-packages path of the installation here. The path should be something like <span style=\" font-style:italic;\">C:\\Program Files\\GNURadio-3.7</span>.</p></body></html>"))
self.radioButtonGnuradioDirectory.setText(_translate("DialogOptions", "Gnuradio Directory"))
self.groupBoxNativeOptions.setTitle(_translate("DialogOptions", "Native options"))
self.labelLibDirs.setText(_translate("DialogOptions", "Library directories:"))
self.btnRebuildNative.setToolTip(_translate("DialogOptions", "<html><head/><body><p>Rebuild the native device extensions. You need to restart URH after this, to use new extensions.</p></body></html>"))
self.btnRebuildNative.setText(_translate("DialogOptions", "Rebuild"))
self.labelNativeRebuildInfo.setText(_translate("DialogOptions", "You can rebuild the native device extensions here. This is useful, when you installed a device driver afterwards or your drivers are stored in an unusual location."))
self.lineEditLibDirs.setPlaceholderText(_translate("DialogOptions", "Comma separated list of additional library directories"))
self.labelRebuildNativeStatus.setText(_translate("DialogOptions", "Rebuild <x> new device extensions. Please restart URH to use them."))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabDevices), _translate("DialogOptions", "Device"))
from urh.ui.KillerDoubleSpinBox import KillerDoubleSpinBox
| gpl-3.0 | 2,843,582,473,850,296,000 | 71.485632 | 460 | 0.741328 | false |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-signal-filtfilt-1.py | 1 | 2375 | # The examples will use several functions from `scipy.signal`.
from scipy import signal
import matplotlib.pyplot as plt
# First we create a one second signal that is the sum of two pure sine
# waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
t = np.linspace(0, 1.0, 2001)
xlow = np.sin(2 * np.pi * 5 * t)
xhigh = np.sin(2 * np.pi * 250 * t)
x = xlow + xhigh
# Now create a lowpass Butterworth filter with a cutoff of 0.125 times
# the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
# The result should be approximately ``xlow``, with no phase shift.
b, a = signal.butter(8, 0.125)
y = signal.filtfilt(b, a, x, padlen=150)
np.abs(y - xlow).max()
# 9.1086182074789912e-06
# We get a fairly clean result for this artificial example because
# the odd extension is exact, and with the moderately long padding,
# the filter's transients have dissipated by the time the actual data
# is reached. In general, transient effects at the edges are
# unavoidable.
# The following example demonstrates the option ``method="gust"``.
# First, create a filter.
b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
np.random.seed(123456)
# `sig` is a random input signal to be filtered.
n = 60
sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
# Apply `filtfilt` to `sig`, once using the Gustafsson method, and
# once using padding, and plot the results for comparison.
fgust = signal.filtfilt(b, a, sig, method="gust")
fpad = signal.filtfilt(b, a, sig, padlen=50)
plt.plot(sig, 'k-', label='input')
plt.plot(fgust, 'b-', linewidth=4, label='gust')
plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
plt.legend(loc='best')
plt.show()
# The `irlen` argument can be used to improve the performance
# of Gustafsson's method.
# Estimate the impulse response length of the filter.
z, p, k = signal.tf2zpk(b, a)
eps = 1e-9
r = np.max(np.abs(p))
approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
approx_impulse_len
# 137
# Apply the filter to a longer signal, with and without the `irlen`
# argument. The difference between `y1` and `y2` is small. For long
# signals, using `irlen` gives a significant performance improvement.
x = np.random.randn(5000)
y1 = signal.filtfilt(b, a, x, method='gust')
y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
print(np.max(np.abs(y1 - y2)))
# 1.80056858312e-10
| gpl-2.0 | 3,619,175,486,744,199,000 | 31.986111 | 70 | 0.704421 | false |
vrbagalkote/avocado-misc-tests-1 | io/disk/ssd/nvmetest.py | 1 | 8116 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Narasimhan V <[email protected]>
"""
NVM-Express user space tooling for Linux, which handles NVMe devices.
This Suite creates and formats a namespace, reads and writes on it
using nvme cli.
"""
import os
from avocado import Test
from avocado import main
from avocado.utils import process
from avocado.utils import download
from avocado.utils.software_manager import SoftwareManager
class NVMeTest(Test):
"""
NVM-Express user space tooling for Linux, which handles NVMe devices.
:param device: Name of the nvme device
:param namespace: Namespace of the device
"""
def setUp(self):
"""
Build 'nvme-cli' and setup the device.
"""
self.device = self.params.get('device', default='/dev/nvme0')
cmd = 'ls %s' % self.device
if process.system(cmd, ignore_status=True) is not 0:
self.cancel("%s does not exist" % self.device)
smm = SoftwareManager()
if not smm.check_installed("nvme-cli") and not \
smm.install("nvme-cli"):
self.cancel('nvme-cli is needed for the test to be run')
self.namespace = self.params.get('namespace', default='1')
self.id_ns = "%sn%s" % (self.device, self.namespace)
cmd = "nvme id-ns %s | grep 'in use' | awk '{print $5}' | \
awk -F':' '{print $NF}'" % self.id_ns
self.format_size = process.system_output(cmd, shell=True).strip('\n')
self.format_size = pow(2, int(self.format_size))
cmd = "nvme id-ns %s | grep 'in use' | awk '{print $2}'" % self.id_ns
self.lba = process.system_output(cmd, shell=True).strip('\n')
self.firmware_url = self.params.get('firmware_url', default='')
if 'firmware_upgrade' in str(self.name) and not self.firmware_url:
self.cancel("firmware url not gien")
test_dic = {'compare': 'Compare', 'formatnamespace': 'Format NVM',
'dsm': 'Data Set Management',
'writezeroes': 'Write Zeroes',
'firmware_upgrade': 'FW Commit and Download',
'writeuncorrectable': 'Write Uncorrectable'}
for key, value in test_dic.iteritems():
if key in str(self.name):
cmd = "nvme id-ctrl %s -H" % self.id_ns
if "%s Supported" % value not in \
process.system_output(cmd, shell=True):
self.cancel("%s is not supported" % value)
def get_firmware_version(self):
"""
Returns the firmware verison.
"""
cmd = "nvme list | grep %s" % self.device
return process.system_output(cmd, shell=True,
ignore_status=True).split()[-1]
def get_firmware_log(self):
"""
Returns the firmware log.
"""
cmd = "nvme fw-log %s" % self.device
return process.system_output(cmd, shell=True, ignore_status=True)
def reset_controller_sysfs(self):
"""
Resets the controller via sysfs.
"""
cmd = "echo 1 > /sys/class/nvme/%s/reset_controller" \
% self.device.split("/")[-1]
return process.system(cmd, shell=True, ignore_status=True)
def test_firmware_upgrade(self):
"""
Updates firmware of the device.
"""
fw_file = self.firmware_url.split('/')[-1]
fw_version = fw_file.split('.')[0]
fw_file_path = download.get_file(self.firmware_url,
os.path.join(self.teststmpdir,
fw_file))
# Getting the current FW details
self.log.debug("Current FW: %s", self.get_firmware_version())
fw_log = self.get_firmware_log()
# Downloading new FW to the device
cmd = "nvme fw-download %s --fw=%s" % (self.device, fw_file_path)
if process.system(cmd, shell=True, ignore_status=True):
self.fail("Failed to download firmware to the device")
# Acvitating new FW on the device
for line in fw_log.splitlines():
if "frs" in line:
s_num = line.split()[0].split("s")[-1]
cmd = "nvme fw-activate %s -a 1 -s %s" % (self.device, s_num)
if process.system(cmd, shell=True, ignore_status=True):
self.fail("Failed to activate firmware for %s" % s_num)
if self.reset_controller_sysfs():
self.fail("Controller reset after FW update failed")
# Getting the current FW details after updating
self.get_firmware_log()
if fw_version != self.get_firmware_version():
self.fail("New Firmware not reflecting after updating")
def testformatnamespace(self):
"""
Formats the namespace on the device.
"""
cmd = 'nvme format %s -l %s' % (self.id_ns, self.lba)
process.run(cmd, shell=True)
def testread(self):
"""
Reads from the namespace on the device.
"""
cmd = 'nvme read %s -z %d -t' % (self.id_ns, self.format_size)
if process.system(cmd, timeout=300, ignore_status=True, shell=True):
self.fail("Read failed")
def testwrite(self):
"""
Write to the namespace on the device.
"""
cmd = 'echo 1|nvme write %s -z %d -t' % (self.id_ns, self.format_size)
if process.system(cmd, timeout=300, ignore_status=True, shell=True):
self.fail("Write failed")
def testcompare(self):
"""
Compares data written on the device with given data.
"""
self.testwrite()
cmd = 'echo 1|nvme compare %s -z %d' % (self.id_ns, self.format_size)
if process.system(cmd, timeout=300, ignore_status=True, shell=True):
self.fail("Compare failed")
def testflush(self):
"""
flush data on controller.
"""
cmd = 'nvme flush %s' % self.id_ns
if process.system(cmd, ignore_status=True, shell=True):
self.fail("Flush failed")
def testwritezeroes(self):
"""
Write zeroes command to the device.
"""
cmd = 'nvme write-zeroes %s' % self.id_ns
if process.system(cmd, ignore_status=True, shell=True):
self.fail("Writing Zeroes failed")
def testwriteuncorrectable(self):
"""
Write uncorrectable command to the device.
"""
cmd = 'nvme write-uncor %s' % self.id_ns
if process.system(cmd, ignore_status=True, shell=True):
self.fail("Writing Uncorrectable failed")
def testdsm(self):
"""
The Dataset Management command test.
"""
cmd = 'nvme dsm %s -a 1 -b 1 -s 1 -d -w -r' % self.id_ns
if process.system(cmd, ignore_status=True, shell=True):
self.fail("Subsystem reset failed")
def testreset(self):
"""
resets the controller.
"""
cmd = 'nvme reset %s' % self.device
if process.system(cmd, ignore_status=True, shell=True):
self.fail("Reset failed")
def testsubsystemreset(self):
"""
resets the controller subsystem.
"""
cmd = 'nvme subsystem-reset %s' % self.device
if process.system(cmd, ignore_status=True, shell=True):
self.fail("Subsystem reset failed")
def testreset_sysfs(self):
"""
resets the controller via sysfs.
"""
if self.reset_controller_sysfs():
self.fail("Reset failed")
if __name__ == "__main__":
main()
| gpl-2.0 | 8,352,796,707,823,800,000 | 35.558559 | 78 | 0.576023 | false |
xkjyeah/gdfmm | demo/demo_rgb_corr.py | 1 | 1988 | #!/usr/bin/env python
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import gdfmm
missing_mask = (cv2.imread('missing_mask.png', cv2.CV_LOAD_IMAGE_UNCHANGED) == 0)
for i in xrange(100):
if os.path.isfile('images/rgb%d.png' % i) and \
os.path.isfile('images/dep%d.png' % i) and \
os.path.isfile('images/missing%d.png' % i):
bgr = cv2.imread('images/rgb%d.png' % i, cv2.CV_LOAD_IMAGE_UNCHANGED)
rgb = cv2.cvtColor(bgr, cv2.cv.CV_BGR2RGB)
dep = cv2.imread('images/dep%d.png' % i, cv2.CV_LOAD_IMAGE_UNCHANGED)
if dep.dtype == np.uint8:
dep = np.array(dep, dtype=np.uint16) * (10000 / 256)
missing = dep.copy()
missing[missing_mask] = 0
inpainted = gdfmm.InpaintDepth2(missing,
rgb,
1, # epsilon
1, # homogenizing constant
blur_sigma = 2.0,
window_size = 11)
# scale the depths to some visible range
dep_scaled = (dep / 10000.0).reshape(dep.shape + (1,)).repeat(3, axis=2)
inp_scaled = (inpainted/ 10000.0).reshape(dep.shape + (1,)).repeat(3, axis=2)
mis_scaled = (missing / 10000.0).reshape(dep.shape + (1,)).repeat(3, axis=2)
rgb_scaled = rgb / 255.0
dep_scaled = np.asarray(dep_scaled, dtype=np.float)
inp_scaled = np.asarray(inp_scaled, dtype=np.float)
rgb_scaled = np.asarray(rgb_scaled, dtype=np.float)
mis_scaled = np.asarray(mis_scaled, dtype=np.float)
side_by_side = np.concatenate(
(np.concatenate( (rgb_scaled, dep_scaled), axis=0 ),
np.concatenate( (mis_scaled, inp_scaled), axis=0 )), axis=1)
plt.figure(figsize=(13,13))
plt.imshow(side_by_side)
plt.show()
| bsd-2-clause | -4,390,210,656,708,027,000 | 37.980392 | 87 | 0.527163 | false |
GoogleCloudPlatform/appengine-python-standard | src/google/appengine/datastore/datastore_stub_util.py | 1 | 173757 | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility functions shared between the file and sqlite datastore stubs.
This module is internal and should not be used by client applications.
"""
import atexit
import collections
import functools
import hashlib
import itertools
import json
import logging
import os
import random
import struct
import threading
import time
import weakref
import six
from six.moves import filter
from six.moves import range
from six.moves import zip
from six.moves import zip_longest
import six.moves.http_client
from google.appengine.api import api_base_pb2
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import cmp_compat
from google.appengine.api import datastore_admin
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import yaml_errors
from google.appengine.api.taskqueue import taskqueue_service_bytes_pb2 as taskqueue_service_pb2
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_stub_index
from google.appengine.datastore import datastore_v4_pb2
from google.appengine.runtime import apiproxy_errors
from google.protobuf import message
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
if six.PY3:
long = int
if datastore_pbs._CLOUD_DATASTORE_ENABLED:
from google.appengine.datastore.datastore_pbs import googledatastore
_MAXIMUM_RESULTS = 300
_MAXIMUM_QUERY_RESULT_BYTES = 2000000
_MAX_QUERY_OFFSET = 1000
_PROPERTY_TYPE_NAMES = {
0: 'NULL',
entity_pb2.PropertyValue.INT64VALUE_FIELD_NUMBER: 'INT64',
entity_pb2.PropertyValue.BOOLEANVALUE_FIELD_NUMBER: 'BOOLEAN',
entity_pb2.PropertyValue.STRINGVALUE_FIELD_NUMBER: 'STRING',
entity_pb2.PropertyValue.DOUBLEVALUE_FIELD_NUMBER: 'DOUBLE',
entity_pb2.PropertyValue.POINTVALUE_FIELD_NUMBER: 'POINT',
entity_pb2.PropertyValue.USERVALUE_FIELD_NUMBER: 'USER',
entity_pb2.PropertyValue.REFERENCEVALUE_FIELD_NUMBER: 'REFERENCE'
}
_SCATTER_PROPORTION = 32768
_MAX_EG_PER_TXN = 25
_BLOB_MEANINGS = frozenset(
(entity_pb2.Property.BLOB, entity_pb2.Property.ENTITY_PROTO,
entity_pb2.Property.TEXT))
_RETRIES = 3
_INITIAL_RETRY_DELAY_MS = 100
_RETRY_DELAY_MULTIPLIER = 2
_MAX_RETRY_DELAY_MS = 120000
MINIMUM_VERSION = 1
SEQUENTIAL = 'sequential'
SCATTERED = 'scattered'
_MAX_SEQUENTIAL_BIT = 52
_MAX_SEQUENTIAL_COUNTER = (1 << _MAX_SEQUENTIAL_BIT) - 1
_MAX_SEQUENTIAL_ID = _MAX_SEQUENTIAL_COUNTER
_MAX_SCATTERED_COUNTER = (1 << (_MAX_SEQUENTIAL_BIT - 1)) - 1
_MAX_SCATTERED_ID = _MAX_SEQUENTIAL_ID + 1 + _MAX_SCATTERED_COUNTER
_SCATTER_SHIFT = 64 - _MAX_SEQUENTIAL_BIT + 1
_EMULATOR_CONFIG_CACHE = None
logger = logging.getLogger('google.appengine.api.stubs.datastore')
def _GetScatterProperty(entity_proto):
"""Gets the scatter property for an object.
For ease of implementation, this is not synchronized with the actual
value on the App Engine server, but should work equally well.
Note: This property may change, either here or in production. No client
other than the mapper framework should rely on it directly.
Returns:
The PropertyValue of the scatter property or None if this entity should not
have a scatter property.
"""
hash_obj = hashlib.md5()
for element in entity_proto.key.path.element:
if element.HasField('name'):
hash_obj.update(element.name.encode('utf-8'))
elif element.HasField('id'):
hash_obj.update(six.ensure_binary(str(element.id)))
hash_bytes = hash_obj.digest()[0:2]
(hash_int,) = struct.unpack('H', hash_bytes)
if hash_int >= _SCATTER_PROPORTION:
return None
scatter_property = entity_pb2.Property()
scatter_property.name = datastore_types.SCATTER_SPECIAL_PROPERTY
scatter_property.meaning = entity_pb2.Property.BYTESTRING
scatter_property.multiple = False
property_value = scatter_property.value
property_value.stringValue = hash_bytes
return scatter_property
_SPECIAL_PROPERTY_MAP = {
datastore_types.SCATTER_SPECIAL_PROPERTY: (False, True, _GetScatterProperty)
}
def GetInvisibleSpecialPropertyNames():
"""Gets the names of all non user-visible special properties."""
invisible_names = []
for name, value in _SPECIAL_PROPERTY_MAP.items():
is_visible, _, _ = value
if not is_visible:
invisible_names.append(name)
return invisible_names
def _PrepareSpecialProperties(entity_proto, is_load):
"""Computes special properties for loading or storing.
Strips other special properties."""
for i in range(len(entity_proto.property) - 1, -1, -1):
if entity_proto.property[i].name in _SPECIAL_PROPERTY_MAP:
del entity_proto.property[i]
for is_visible, is_stored, property_func in _SPECIAL_PROPERTY_MAP.values():
if is_load:
should_process = is_visible
else:
should_process = is_stored
if should_process:
special_property = property_func(entity_proto)
if special_property:
entity_proto.property.append(special_property)
_METADATA_PROPERTY_NAME = '__metadata__'
def _FromStorageEntity(entity):
"""Converts a stored entity protobuf to an EntityRecord (with metadata).
This function is only provided as convenience for storage implementations that
wish to store metadata directly on EntityProto.
Args:
entity: An Entity protobuf.
Returns:
The EntityRecord including the EntityMetadata protobuf that was stored as a
property on the Entity protobuf or an empty EntityMetadata if that Entity
has no metadata property.
"""
clone = entity_pb2.EntityProto()
clone.CopyFrom(entity)
metadata = entity_pb2.EntityMetadata()
for i in range(len(clone.property) - 1, -1, -1):
prop = clone.property[i]
if _METADATA_PROPERTY_NAME == prop.name:
del clone.property[i]
metadata = entity_pb2.EntityMetadata.FromString(prop.value.stringValue)
return EntityRecord(clone, metadata)
def _ToStorageEntity(record):
"""Store a metadata object as a pickled string property on an entity protobuf.
This function is only provided as convenience for storage implementations that
wish to store metadata directly on EntityProto.
Args:
record: An EntityRecord.
Returns:
A copy of the entity with an additional string property that contains the
pickled metadata object. Returns None If the record is None.
"""
if record:
clone = entity_pb2.EntityProto()
clone.CopyFrom(record.entity)
serialized_metadata = record.metadata.SerializeToString()
metadata_property = clone.property.add()
metadata_property.name = _METADATA_PROPERTY_NAME
metadata_property.meaning = entity_pb2.Property.BLOB
metadata_property.multiple = False
metadata_property.value.stringValue = serialized_metadata
return clone
def _GetGroupByKey(entity, property_names):
"""Computes a key value that uniquely identifies the 'group' of an entity.
Args:
entity: The entity_pb2.EntityProto for which to create the group key.
property_names: The names of the properties in the group by clause.
Returns:
A hashable value that uniquely identifies the entity's 'group'.
"""
return frozenset((prop.name, prop.value.SerializeToString())
for prop in entity.property
if prop.name in property_names)
def PrepareSpecialPropertiesForStore(entity_proto):
"""Computes special properties for storing.
Strips other special properties."""
_PrepareSpecialProperties(entity_proto, False)
def LoadEntity(entity, keys_only=False, property_names=None):
"""Prepares an entity to be returned to the user.
Args:
entity: a entity_pb2.EntityProto or None
keys_only: if a keys only result should be produced
property_names: if not None or empty, cause a projected entity
to be produced with the given properties.
Returns:
A user friendly copy of entity or None.
"""
if entity:
clone = entity_pb2.EntityProto()
if property_names:
clone.key.CopyFrom(entity.key)
clone.entity_group.SetInParent()
seen = set()
for prop in entity.property:
if prop.name in property_names:
Check(prop.name not in seen, 'datastore dev stub produced bad result',
datastore_pb.Error.INTERNAL_ERROR)
seen.add(prop.name)
new_prop = clone.property.add()
new_prop.name = prop.name
new_prop.meaning = entity_pb2.Property.INDEX_VALUE
new_prop.value.CopyFrom(prop.value)
new_prop.multiple = False
elif keys_only:
clone.key.CopyFrom(entity.key)
clone.entity_group.SetInParent()
else:
clone.CopyFrom(entity)
PrepareSpecialPropertiesForLoad(clone)
return clone
def LoadRecord(record, keys_only=False, property_names=None):
"""Prepares a record to be returned to the user.
Args:
record: an EntityRecord or None
keys_only: if a keys only result should be produced
property_names: if not None or empty, cause a projected entity
to be produced with the given properties.
Returns:
A user friendly copy of record or None.
"""
if record:
metadata = record.metadata
if keys_only or property_names:
metadata = entity_pb2.EntityMetadata()
return EntityRecord(LoadEntity(record.entity, keys_only, property_names),
metadata)
def StoreRecord(record):
"""Prepares a record for storing.
Args:
record: an EntityRecord to prepare
Returns:
A copy of the record that can be stored.
"""
clone = entity_pb2.EntityProto()
clone.CopyFrom(record.entity)
PrepareSpecialPropertiesForStore(clone)
return EntityRecord(clone, record.metadata)
def PrepareSpecialPropertiesForLoad(entity_proto):
"""Computes special properties that are user-visible.
Strips other special properties."""
_PrepareSpecialProperties(entity_proto, True)
def Check(test, msg='', error_code=datastore_pb.Error.BAD_REQUEST):
"""Raises an apiproxy_errors.ApplicationError if the condition is false.
Args:
test: A condition to test.
msg: A string to return with the error.
error_code: One of datastore_pb.Error to use as an error code.
Raises:
apiproxy_errors.ApplicationError: If test is false.
"""
if not test:
raise apiproxy_errors.ApplicationError(error_code, msg)
def CheckValidUTF8(string, desc):
"""Check that the given string is valid UTF-8.
Args:
string: the string to validate.
desc: a description of the string being validated.
Raises:
apiproxy_errors.ApplicationError: if the string is not valid UTF-8.
"""
if isinstance(string, six.text_type):
return True
try:
string.decode('utf-8')
except UnicodeDecodeError:
Check(False, '%s is not valid UTF-8.' % desc)
def CheckAppId(request_trusted, request_app_id, app_id):
"""Check that this is the stub for app_id.
Args:
request_trusted: If the request is trusted.
request_app_id: The application ID of the app making the request.
app_id: An application ID.
Raises:
apiproxy_errors.ApplicationError: if this is not the stub for app_id.
"""
assert app_id
CheckValidUTF8(app_id, 'app id')
Check(request_trusted or app_id == request_app_id,
'app "%s" cannot access app "%s"\'s data' % (request_app_id, app_id))
def CheckReference(request_trusted,
request_app_id,
key,
require_id_or_name=True):
"""Check this key.
Args:
request_trusted: If the request is trusted.
request_app_id: The application ID of the app making the request.
key: entity_pb2.Reference
require_id_or_name: Boolean indicating if we should enforce the presence of
an id or name in the last element of the key's path.
Raises:
apiproxy_errors.ApplicationError: if the key is invalid
"""
assert isinstance(key, entity_pb2.Reference)
CheckAppId(request_trusted, request_app_id, key.app)
Check(key.path.element, 'key\'s path cannot be empty')
if require_id_or_name:
Check(datastore_pbs.is_complete_v3_key(key), 'missing key id/name')
for elem in key.path.element:
Check(not elem.HasField('id') or not elem.HasField('name'),
'each key path element should have id or name but not both: %r' % key)
CheckValidUTF8(elem.type, 'key path element type')
if elem.HasField('name'):
CheckValidUTF8(elem.name, 'key path element name')
def CheckEntity(request_trusted, request_app_id, entity):
"""Check if this entity can be stored.
Args:
request_trusted: If the request is trusted.
request_app_id: The application ID of the app making the request.
entity: entity_pb2.EntityProto
Raises:
apiproxy_errors.ApplicationError: if the entity is invalid
"""
CheckReference(request_trusted, request_app_id, entity.key, False)
for prop in entity.property:
CheckProperty(request_trusted, request_app_id, prop)
for prop in entity.raw_property:
CheckProperty(request_trusted, request_app_id, prop, indexed=False)
def CheckProperty(request_trusted, request_app_id, prop, indexed=True):
"""Check if this property can be stored.
Args:
request_trusted: If the request is trusted.
request_app_id: The application ID of the app making the request.
prop: entity_pb2.Property
indexed: Whether the property is indexed.
Raises:
apiproxy_errors.ApplicationError: if the property is invalid
"""
name = prop.name
value = prop.value
meaning = prop.meaning
CheckValidUTF8(name, 'property name')
Check(request_trusted or
not datastore_types.RESERVED_PROPERTY_NAME.match(name),
'cannot store entity with reserved property name \'%s\'' % name)
Check(prop.meaning != entity_pb2.Property.INDEX_VALUE,
'Entities with incomplete properties cannot be written.')
is_blob = meaning in _BLOB_MEANINGS
if indexed:
Check(not is_blob,
'BLOB, ENITY_PROTO or TEXT property ' + name +
' must be in a raw_property field')
max_length = datastore_types._MAX_STRING_LENGTH
else:
if is_blob:
Check(
value.HasField('stringValue'),
'BLOB / ENTITY_PROTO / TEXT raw property ' + name +
'must have a string value')
max_length = datastore_types._MAX_RAW_PROPERTY_BYTES
if meaning == entity_pb2.Property.ATOM_LINK:
max_length = datastore_types._MAX_LINK_PROPERTY_LENGTH
CheckPropertyValue(name, value, max_length, meaning)
def CheckPropertyValue(name, value, max_length, meaning):
"""Check if this property value can be stored.
Args:
name: name of the property
value: entity_pb2.PropertyValue
max_length: maximum length for string values
meaning: meaning of the property
Raises:
apiproxy_errors.ApplicationError: if the property is invalid
"""
num_values = (
value.HasField('int64Value') + value.HasField('stringValue') +
value.HasField('booleanValue') + value.HasField('doubleValue') +
value.HasField('pointvalue') + value.HasField('uservalue') +
value.HasField('referencevalue'))
Check(num_values <= 1, 'PropertyValue for ' + name +
' has multiple value fields set')
if value.HasField('stringValue'):
s = value.stringValue
if isinstance(s, six.text_type):
s = s.encode('utf-8')
Check(len(s) <= max_length,
'Property %s is too long. Maximum length is %d.' % (name, max_length))
if (meaning not in _BLOB_MEANINGS and
meaning != entity_pb2.Property.BYTESTRING):
CheckValidUTF8(value.stringValue, 'String property "%s" value' % name)
def CheckTransaction(request_trusted, request_app_id, transaction):
"""Check that this transaction is valid.
Args:
request_trusted: If the request is trusted.
request_app_id: The application ID of the app making the request.
transaction: datastore_pb.Transaction()
Raises:
apiproxy_errors.ApplicationError: if the transaction is not valid.
"""
assert isinstance(transaction, datastore_pb.Transaction)
CheckAppId(request_trusted, request_app_id, transaction.app)
def CheckQuery(query, filters, orders, max_query_components):
"""Check a datastore query with normalized filters, orders.
Raises an ApplicationError when any of the following conditions are violated:
- transactional queries have an ancestor
- queries that are not too large
(sum of filters, orders, ancestor <= max_query_components)
- ancestor (if any) app and namespace match query app and namespace
- kindless queries only filter on __key__ and only sort on __key__ ascending
- multiple inequality (<, <=, >, >=) filters all applied to the same property
- filters on __key__ compare to a reference in the same app and namespace as
the query
- if an inequality filter on prop X is used, the first order (if any) must
be on X
Args:
query: query to validate
filters: normalized (by datastore_index.Normalize) filters from query
orders: normalized (by datastore_index.Normalize) orders from query
max_query_components: limit on query complexity
"""
Check(not query.property_name or not query.keys_only,
'projection and keys_only cannot both be set')
projected_properties = set(query.property_name)
for prop_name in query.property_name:
Check(not datastore_types.RESERVED_PROPERTY_NAME.match(prop_name),
'projections are not supported for the property: ' + prop_name)
Check(
len(projected_properties) == len(query.property_name),
'cannot project a property multiple times')
key_prop_name = datastore_types.KEY_SPECIAL_PROPERTY
unapplied_log_timestamp_us_name = (
datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY)
if query.HasField('transaction'):
Check(
query.HasField('ancestor'),
'Only ancestor queries are allowed inside transactions.')
num_components = len(filters) + len(orders)
if query.HasField('ancestor'):
num_components += 1
Check(num_components <= max_query_components,
'query is too large. may not have more than %s filters'
' + sort orders ancestor total' % max_query_components)
if query.HasField('ancestor'):
ancestor = query.ancestor
Check(query.app == ancestor.app,
'query app is %s but ancestor app is %s' % (query.app, ancestor.app))
Check(
query.name_space == ancestor.name_space,
'query namespace is %s but ancestor namespace is %s' %
(query.name_space, ancestor.name_space))
if query.group_by_property_name:
group_by_set = set(query.group_by_property_name)
for order in orders:
if not group_by_set:
break
Check(
order.property in group_by_set,
'items in the group by clause must be specified first '
'in the ordering')
group_by_set.remove(order.property)
ineq_prop_name = None
for filter in filters:
Check(
len(filter.property) == 1,
'Filter has %d properties, expected 1' % len(filter.property))
prop = filter.property[0]
prop_name = prop.name
if prop_name == key_prop_name:
Check(
prop.value.HasField('referencevalue'),
'%s filter value must be a Key' % key_prop_name)
ref_val = prop.value.referencevalue
Check(
ref_val.app == query.app, '%s filter app is %s but query app is %s' %
(key_prop_name, ref_val.app, query.app))
Check(
ref_val.name_space == query.name_space,
'%s filter namespace is %s but query namespace is %s' %
(key_prop_name, ref_val.name_space, query.name_space))
if filter.op in datastore_index.EQUALITY_OPERATORS:
Check(prop_name not in projected_properties,
'cannot use projection on a property with an equality filter')
if (filter.op in datastore_index.INEQUALITY_OPERATORS and
prop_name != unapplied_log_timestamp_us_name):
if ineq_prop_name is None:
ineq_prop_name = prop_name
else:
Check(ineq_prop_name == prop_name,
'Only one inequality filter per query is supported. '
'Encountered both %s and %s' % (ineq_prop_name, prop_name))
if ineq_prop_name is not None and query.group_by_property_name and not orders:
Check(ineq_prop_name in group_by_set,
'Inequality filter on %s must also be a group by '
'property when group by properties are set.'
% (ineq_prop_name))
if ineq_prop_name is not None and orders:
first_order_prop = _Decode(orders[0].property)
Check(first_order_prop == ineq_prop_name,
'The first sort property must be the same as the property '
'to which the inequality filter is applied. In your query '
'the first sort property is %s but the inequality filter '
'is on %s' % (first_order_prop, ineq_prop_name))
if not query.HasField('kind'):
for filter in filters:
prop_name = _Decode(filter.property[0].name)
Check(prop_name == key_prop_name or
prop_name == unapplied_log_timestamp_us_name,
'kind is required for non-__key__ filters')
for order in orders:
prop_name = _Decode(order.property)
Check(
prop_name == key_prop_name and
order.direction is datastore_pb.Query.Order.ASCENDING,
'kind is required for all orders except __key__ ascending')
def _Decode(string):
"""Converts binary string to utf-8."""
if isinstance(string, six.text_type):
return string
return string.decode('utf-8')
class ValueRange(object):
"""A range of values defined by its two extremes (inclusive or exclusive)."""
def __init__(self):
"""Constructor.
Creates an unlimited range.
"""
self.__start = self.__end = None
self.__start_inclusive = self.__end_inclusive = False
def Update(self, rel_op, limit):
"""Filter the range by 'rel_op limit'.
Args:
rel_op: relational operator from datastore_pb.Query.Filter.
limit: the value to limit the range by.
"""
if rel_op == datastore_pb.Query.Filter.LESS_THAN:
if self.__end is None or limit <= self.__end:
self.__end = limit
self.__end_inclusive = False
elif (rel_op == datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL or
rel_op == datastore_pb.Query.Filter.EQUAL):
if self.__end is None or limit < self.__end:
self.__end = limit
self.__end_inclusive = True
if rel_op == datastore_pb.Query.Filter.GREATER_THAN:
if self.__start is None or limit >= self.__start:
self.__start = limit
self.__start_inclusive = False
elif (rel_op == datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL or
rel_op == datastore_pb.Query.Filter.EQUAL):
if self.__start is None or limit > self.__start:
self.__start = limit
self.__start_inclusive = True
def Contains(self, value):
"""Check if the range contains a specific value.
Args:
value: the value to check.
Returns:
True iff value is contained in this range.
"""
if self.__start is not None:
if self.__start_inclusive and value < self.__start: return False
if not self.__start_inclusive and value <= self.__start: return False
if self.__end is not None:
if self.__end_inclusive and value > self.__end: return False
if not self.__end_inclusive and value >= self.__end: return False
return True
def Remap(self, mapper):
"""Transforms the range extremes with a function.
The function mapper must preserve order, i.e.
x rel_op y iff mapper(x) rel_op y
Args:
mapper: function to apply to the range extremes.
"""
self.__start = self.__start and mapper(self.__start)
self.__end = self.__end and mapper(self.__end)
def MapExtremes(self, mapper):
"""Evaluate a function on the range extremes.
Args:
mapper: function to apply to the range extremes.
Returns:
(x, y) where x = None if the range has no start,
mapper(start, start_inclusive, False) otherwise
y = None if the range has no end,
mapper(end, end_inclusive, True) otherwise
"""
return (
self.__start and mapper(self.__start, self.__start_inclusive, False),
self.__end and mapper(self.__end, self.__end_inclusive, True))
def ParseKeyFilteredQuery(filters, orders):
"""Parse queries which only allow filters and ascending-orders on __key__.
Raises exceptions for illegal queries.
Args:
filters: the normalized filters of a query.
orders: the normalized orders of a query.
Returns:
The key range (a ValueRange over datastore_types.Key) requested in the
query.
"""
remaining_filters = []
key_range = ValueRange()
key_prop = datastore_types.KEY_SPECIAL_PROPERTY
for f in filters:
op = f.op
if not (len(f.property) == 1 and f.property[0].name == key_prop and
not (op == datastore_pb.Query.Filter.IN or
op == datastore_pb.Query.Filter.EXISTS)):
remaining_filters.append(f)
continue
val = f.property[0].value
Check(val.HasField('referencevalue'),
'__key__ kind must be compared to a key')
limit = datastore_types.FromReferenceProperty(val)
key_range.Update(op, limit)
remaining_orders = []
for o in orders:
if not (o.direction == datastore_pb.Query.Order.ASCENDING and
o.property == datastore_types.KEY_SPECIAL_PROPERTY):
remaining_orders.append(o)
else:
break
Check(not remaining_filters,
'Only comparison filters on ' + key_prop + ' supported')
Check(not remaining_orders,
'Only ascending order on ' + key_prop + ' supported')
return key_range
def ParseKindQuery(query, filters, orders):
"""Parse __kind__ (schema) queries.
Raises exceptions for illegal queries.
Args:
query: A Query PB.
filters: the normalized filters from query.
orders: the normalized orders from query.
Returns:
The kind range (a ValueRange over string) requested in the query.
"""
Check(not query.HasField('ancestor'),
'ancestor queries on __kind__ not allowed')
key_range = ParseKeyFilteredQuery(filters, orders)
key_range.Remap(_KindKeyToString)
return key_range
def _KindKeyToString(key):
"""Extract kind name from __kind__ key.
Raises an ApplicationError if the key is not of the form '__kind__'/name.
Args:
key: a key for a __kind__ instance.
Returns:
kind specified by key.
"""
key_path = key.to_path()
if (len(key_path) == 2 and key_path[0] == '__kind__' and
isinstance(key_path[1], six.string_types)):
return key_path[1]
Check(False, 'invalid Key for __kind__ table')
def ParseNamespaceQuery(query, filters, orders):
"""Parse __namespace__ queries.
Raises exceptions for illegal queries.
Args:
query: A Query PB.
filters: the normalized filters from query.
orders: the normalized orders from query.
Returns:
The kind range (a ValueRange over string) requested in the query.
"""
Check(not query.HasField('ancestor'),
'ancestor queries on __namespace__ not allowed')
key_range = ParseKeyFilteredQuery(filters, orders)
key_range.Remap(_NamespaceKeyToString)
return key_range
def _NamespaceKeyToString(key):
"""Extract namespace name from __namespace__ key.
Raises an ApplicationError if the key is not of the form '__namespace__'/name
or '__namespace__'/_EMPTY_NAMESPACE_ID.
Args:
key: a key for a __namespace__ instance.
Returns:
namespace specified by key.
"""
key_path = key.to_path()
if len(key_path) == 2 and key_path[0] == '__namespace__':
if key_path[1] == datastore_types._EMPTY_NAMESPACE_ID:
return ''
if isinstance(key_path[1], six.string_types):
return key_path[1]
Check(False, 'invalid Key for __namespace__ table')
def ParsePropertyQuery(query, filters, orders):
"""Parse __property__ queries.
Raises exceptions for illegal queries.
Args:
query: A Query PB.
filters: the normalized filters from query.
orders: the normalized orders from query.
Returns:
The kind range (a ValueRange over (kind, property) pairs) requested
in the query.
"""
Check(not query.HasField('transaction'),
'transactional queries on __property__ not allowed')
key_range = ParseKeyFilteredQuery(filters, orders)
key_range.Remap(lambda x: _PropertyKeyToString(x, ''))
if query.HasField('ancestor'):
ancestor = datastore_types.Key._FromPb(query.ancestor)
ancestor_kind, ancestor_property = _PropertyKeyToString(ancestor, None)
if ancestor_property is not None:
key_range.Update(datastore_pb.Query.Filter.EQUAL,
(ancestor_kind, ancestor_property))
else:
key_range.Update(datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL,
(ancestor_kind, ''))
key_range.Update(datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL,
(ancestor_kind + '\0', ''))
query.ClearField('ancestor')
return key_range
def _PropertyKeyToString(key, default_property):
"""Extract property name from __property__ key.
Raises an ApplicationError if the key is not of the form
'__kind__'/kind, '__property__'/property or '__kind__'/kind
Args:
key: a key for a __property__ instance.
default_property: property value to return when key only has a kind.
Returns:
kind, property if key = '__kind__'/kind, '__property__'/property
kind, default_property if key = '__kind__'/kind
"""
key_path = key.to_path()
if (len(key_path) == 2 and key_path[0] == '__kind__' and
isinstance(key_path[1], six.string_types)):
return (key_path[1], default_property)
if (len(key_path) == 4 and key_path[0] == '__kind__' and
isinstance(key_path[1], six.string_types) and
key_path[2] == '__property__' and
isinstance(key_path[3], six.string_types)):
return (key_path[1], key_path[3])
Check(False, 'invalid Key for __property__ table')
def SynthesizeUserId(email):
"""Return a synthetic user ID from an email address.
Note that this is not the same user ID found in the production system.
Args:
email: An email address.
Returns:
A string userid derived from the email address.
"""
user_id_digest = hashlib.md5(email.lower().encode('utf-8')).digest()
user_id = '1' + ''.join(['%02d' % x for x in six.iterbytes(user_id_digest)
])[:20]
return user_id
def FillUsersInQuery(filters):
"""Fill in a synthetic user ID for all user properties in a set of filters.
Args:
filters: The normalized filters from query.
"""
for filter in filters:
for prop in filter.property:
FillUser(prop)
def FillUser(property):
"""Fill in a synthetic user ID for a user properties.
Args:
property: A Property which may have a user value.
"""
if property.value.HasField('uservalue'):
uid = SynthesizeUserId(property.value.uservalue.email)
if uid:
property.value.uservalue.obfuscated_gaiaid = uid
class BaseCursor(object):
"""A base query cursor over a list of entities.
Public properties:
cursor: the integer cursor.
app: the app for which this cursor was created.
keys_only: whether the query is keys_only.
Class attributes:
_next_cursor: the next cursor to allocate.
_next_cursor_lock: protects _next_cursor.
"""
_next_cursor = 1
_next_cursor_lock = threading.Lock()
def __init__(self, query, dsquery, orders, index_list):
"""Constructor.
Args:
query: the query request proto.
dsquery: a datastore_query.Query over query.
orders: the orders of query as returned by _GuessOrders.
index_list: the list of indexes used by the query.
"""
self.keys_only = query.keys_only
self.property_names = set(query.property_name)
self.group_by = set(query.group_by_property_name)
self.app = query.app
self.cursor = self._AcquireCursorID()
if query.HasField('count'):
count = query.count
elif query.HasField('limit'):
count = query.limit
else:
count = BaseDatastore._BATCH_SIZE
self.__use_persisted_offset = query.persist_offset
self.__persisted_offset = query.offset
self.__persisted_count = count
self.__order_compare_entities = dsquery._order.cmp_for_filter(
dsquery._filter_predicate)
if self.group_by:
self.__cursor_properties = self.group_by
else:
self.__cursor_properties = set(order.property for order in orders)
self.__cursor_properties.add('__key__')
self.__cursor_properties = frozenset(self.__cursor_properties)
self.__first_sort_order = orders[0].direction
self.__index_list = index_list
def _PopulateResultMetadata(self, query_result, compile,
first_result, last_result):
query_result.keys_only = self.keys_only
if query_result.more_results:
cursor = query_result.cursor
cursor.app = self.app
cursor.cursor = self.cursor
if compile:
self._EncodeCompiledCursor(last_result, query_result.compiled_cursor)
if first_result:
query_result.index.extend(self.__index_list)
@classmethod
def _AcquireCursorID(cls):
"""Acquires the next cursor id in a thread safe manner."""
cls._next_cursor_lock.acquire()
try:
cursor_id = cls._next_cursor
cls._next_cursor += 1
finally:
cls._next_cursor_lock.release()
return cursor_id
def _IsBeforeCursor(self, record, cursor):
"""True if entity is before cursor according to the current order.
Args:
record: an EntityRecord.
cursor: a compiled cursor as returned by _DecodeCompiledCursor.
"""
comparison_entity = entity_pb2.EntityProto()
for prop in record.entity.property:
if prop.name in self.__cursor_properties:
comparison_entity.property.add().MergeFrom(prop)
if cursor[0].HasField('key'):
comparison_entity.key.MergeFrom(record.entity.key)
x = self.__order_compare_entities(comparison_entity, cursor[0])
if cursor[1]:
return x < 0
else:
return x <= 0
def _DecodeCompiledCursor(self, compiled_cursor):
"""Converts a compiled_cursor into a cursor_entity.
Args:
compiled_cursor: The datastore_pb.CompiledCursor to decode.
Returns:
(cursor_entity, inclusive): an entity_pb2.EntityProto and if it should
be included in the result set.
"""
assert compiled_cursor.HasField('postfix_position')
position = compiled_cursor.postfix_position
remaining_properties = set(self.__cursor_properties)
cursor_entity = entity_pb2.EntityProto()
if position.HasField('key'):
cursor_entity.key.CopyFrom(position.key)
try:
remaining_properties.remove('__key__')
except KeyError:
Check(False, 'Cursor does not match query: extra value __key__')
for index_value in position.index_value:
prop = cursor_entity.property.add()
prop.name = index_value.property_name
prop.value.CopyFrom(index_value.value)
try:
remaining_properties.remove(index_value.property_name)
except KeyError:
Check(
False, 'Cursor does not match query: extra value %s' %
index_value.property_name)
Check(not remaining_properties,
'Cursor does not match query: missing values for %r' %
remaining_properties)
return (cursor_entity, position.before)
def _EncodeCompiledCursor(self, last_result, compiled_cursor):
"""Converts the current state of the cursor into a compiled_cursor.
Args:
last_result: the last result returned by this query.
compiled_cursor: an empty datastore_pb.CompiledCursor.
"""
if last_result is not None:
position = compiled_cursor.postfix_position
if '__key__' in self.__cursor_properties:
position.key.MergeFrom(last_result.key)
for prop in last_result.property:
if prop.name in self.__cursor_properties:
index_value = position.index_value.add()
index_value.property_name = prop.name
index_value.value.CopyFrom(prop.value)
position.before = False
_SetBeforeAscending(position, self.__first_sort_order)
def PopulateQueryResult(self, result, count, deprecated_offset,
compile=False, first_result=False):
"""Populates a QueryResult with this cursor and the given number of results.
Args:
result: datastore_pb.QueryResult
count: integer of how many results to return, or None if not specified
deprecated_offset: integer of how many results to skip, deprecated.
compile: boolean, whether we are compiling this query
first_result: whether the query result is the first for this query
Raises:
datastore_errors.BadArgumentError: if the offset doesn't match the
original offset from the RunQuery call.
"""
if count is None:
count = self.__persisted_count
if (deprecated_offset is not None
and self.__persisted_offset != deprecated_offset):
raise datastore_errors.BadArgumentError(
'Invalid offset provided. Got %d expected %d.'
% (deprecated_offset, self.__persisted_offset))
self._PopulateQueryResult(result, count, self.__persisted_offset,
compile, first_result)
self.__persisted_offset -= result.skipped_results
def _PopulateQueryResult(self, result, count, offset,
compile, first_result):
raise NotImplementedError
class ListCursor(BaseCursor):
"""A query cursor over a list of entities.
Public properties:
keys_only: whether the query is keys_only
"""
def __init__(self, query, dsquery, orders, index_list, results):
"""Constructor.
Args:
query: the query request proto
dsquery: a datastore_query.Query over query.
orders: the orders of query as returned by _GuessOrders.
index_list: the list of indexes used by the query.
results: list of EntityRecord.
"""
super(ListCursor, self).__init__(query, dsquery, orders, index_list)
if self.group_by:
distincts = set()
new_results = []
for result in results:
key_value = _GetGroupByKey(result.entity, self.group_by)
if key_value not in distincts:
distincts.add(key_value)
new_results.append(result)
results = new_results
if query.shallow:
key_path_length = 1
if query.HasField('ancestor'):
key_path_length += len(query.ancestor.path.element)
new_results = []
for result in results:
if len(result.entity.key.path.element) == key_path_length:
new_results.append(result)
results = new_results
if (query.HasField('compiled_cursor') and
query.compiled_cursor.HasField('postfix_position')):
start_cursor = self._DecodeCompiledCursor(query.compiled_cursor)
self.__last_result = start_cursor[0]
start_cursor_position = self._GetCursorOffset(results, start_cursor)
else:
self.__last_result = None
start_cursor_position = 0
if query.HasField('end_compiled_cursor'):
if query.end_compiled_cursor.HasField('postfix_position'):
end_cursor = self._DecodeCompiledCursor(query.end_compiled_cursor)
end_cursor_position = self._GetCursorOffset(results, end_cursor)
else:
end_cursor_position = 0
else:
end_cursor_position = len(results)
results = results[start_cursor_position:end_cursor_position]
if query.HasField('limit'):
limit = query.limit
if query.offset:
limit += query.offset
if limit >= 0 and limit < len(results):
results = results[:limit]
self.__results = results
self.__offset = 0
self.__count = len(self.__results)
def _GetCursorOffset(self, results, cursor):
"""Converts a cursor into a offset into the result set even if the
cursor's entity no longer exists.
Args:
results: the query's results (sequence of entity_pb2.EntityProto)
cursor: a compiled cursor as returned by _DecodeCompiledCursor
Returns:
the integer offset
"""
lo = 0
hi = len(results)
while lo < hi:
mid = (lo + hi) // 2
if self._IsBeforeCursor(results[mid], cursor):
lo = mid + 1
else:
hi = mid
return lo
def _PopulateQueryResult(self, result, count, offset, compile, first_result):
Check(offset >= 0, 'Offset must be >= 0')
offset = min(offset, self.__count - self.__offset)
limited_offset = min(offset, _MAX_QUERY_OFFSET)
if limited_offset:
self.__offset += limited_offset
result.skipped_results = limited_offset
if compile and result.skipped_results > 0:
self._EncodeCompiledCursor(self.__results[self.__offset - 1].entity,
result.skipped_results_compiled_cursor)
if offset == limited_offset and count:
if count > _MAXIMUM_RESULTS:
count = _MAXIMUM_RESULTS
results = self.__results[self.__offset:self.__offset + count]
count = len(results)
self.__offset += count
records = [LoadRecord(record, self.keys_only, self.property_names)
for record in results]
entities = [record.entity for record in records]
versions = [
record.metadata.updated_version
for record in records
if record.metadata.HasField('updated_version')
]
result.result.extend(entities)
if len(versions) == len(entities):
result.version.extend(versions)
if compile:
for record in results:
self._EncodeCompiledCursor(record.entity,
result.result_compiled_cursor.add())
if self.__offset:
self.__last_result = self.__results[self.__offset - 1].entity
result.more_results = self.__offset < self.__count
self._PopulateResultMetadata(result, compile,
first_result, self.__last_result)
def _SynchronizeTxn(function):
"""A decorator that locks a transaction during the function call."""
def sync(txn, *args, **kwargs):
txn._lock.acquire()
try:
Check(txn._state is LiveTxn.ACTIVE, 'transaction closed')
return function(txn, *args, **kwargs)
finally:
txn._lock.release()
return sync
def _GetEntityGroup(ref):
"""Returns the entity group key for the given reference."""
entity_group = entity_pb2.Reference()
entity_group.CopyFrom(ref)
assert (entity_group.path.element[0].HasField('id') or
entity_group.path.element[0].HasField('name'))
del entity_group.path.element[1:]
return entity_group
def _GetKeyKind(key):
"""Return the kind of the given key."""
return key.path.element[-1].type
def _FilterIndexesByKind(key, indexes):
"""Return only the indexes with the specified kind."""
return list(
filter((lambda index: index.definition.entity_type == _GetKeyKind(key)),
indexes))
class LiveTxn(object):
"""An in flight transaction."""
ACTIVE = 1
COMMITTED = 2
ROLLEDBACK = 3
FAILED = 4
_state = ACTIVE
_commit_time_s = None
def __init__(self, txn_manager, app, allow_multiple_eg, mode):
assert isinstance(txn_manager, BaseTransactionManager)
assert isinstance(app, six.string_types)
self._txn_manager = txn_manager
self._app = app
self._allow_multiple_eg = allow_multiple_eg
self._mode = mode
self._entity_groups = {}
self._lock = threading.RLock()
self._apply_lock = threading.Lock()
self._actions = []
self._cost = datastore_pb.Cost()
self._mutation_versions = {}
self._mutated_references = []
self._kind_to_indexes = collections.defaultdict(list)
def _GetTracker(self, reference):
"""Gets the entity group tracker for reference.
If this is the first time reference's entity group is seen, creates a new
tracker, checking that the transaction doesn't exceed the entity group
limit.
"""
entity_group = _GetEntityGroup(reference)
key = datastore_types.ReferenceToKeyValue(entity_group)
tracker = self._entity_groups.get(key, None)
if tracker is None:
Check(
self._app == reference.app,
'Transactions cannot span applications (expected %s, got %s)' %
(self._app, reference.app))
if self._allow_multiple_eg:
Check(len(self._entity_groups) < _MAX_EG_PER_TXN,
'operating on too many entity groups in a single transaction.')
else:
Check(len(self._entity_groups) < 1,
'cross-groups transaction need to be explicitly '
'specified (xg=True)')
tracker = EntityGroupTracker(entity_group)
self._entity_groups[key] = tracker
return tracker
def _GetAllTrackers(self):
"""Get the trackers for the transaction's entity groups.
If no entity group has been discovered returns a 'global' entity group
tracker. This is possible if the txn only contains transactional tasks.
Returns:
The tracker list for the entity groups used in this txn.
"""
if not self._entity_groups:
self._GetTracker(datastore_types.Key.from_path(
'__global__', 1, _app=self._app)._ToPb())
return list(self._entity_groups.values())
def _GrabSnapshot(self, reference):
"""Gets snapshot for this reference, creating it if necessary.
If no snapshot has been set for reference's entity group, a snapshot is
taken and stored for future reads (this also sets the read position),
and a CONCURRENT_TRANSACTION exception is thrown if we no longer have
a consistent snapshot.
Args:
reference: A entity_pb2.Reference from which to extract the entity group.
Raises:
apiproxy_errors.ApplicationError if the snapshot is not consistent.
"""
tracker = self._GetTracker(reference)
check_contention = tracker._snapshot is None
snapshot = tracker._GrabSnapshot(self._txn_manager)
if check_contention:
candidates = [other for other in self._entity_groups.values()
if other._snapshot is not None and other != tracker]
meta_data_list = [other._meta_data for other in candidates]
self._txn_manager._AcquireWriteLocks(meta_data_list)
try:
for other in candidates:
if other._meta_data._log_pos != other._read_pos:
self._state = self.FAILED
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.CONCURRENT_TRANSACTION,
'Concurrency exception.')
finally:
self._txn_manager._ReleaseWriteLocks(meta_data_list)
return snapshot
@_SynchronizeTxn
def Get(self, reference):
"""Returns the entity associated with the given entity_pb2.Reference or None.
Does not see any modifications in the current txn.
Args:
reference: The entity_pb2.Reference of the entity to look up.
Returns:
The associated entity_pb2.EntityProto or None if no such entity exists.
"""
snapshot = self._GrabSnapshot(reference)
record = snapshot.get(datastore_types.ReferenceToKeyValue(reference))
return LoadRecord(record)
@_SynchronizeTxn
def GetQueryCursor(self, query, filters, orders, index_list):
"""Runs the given datastore_pb.Query and returns a QueryCursor for it.
Does not see any modifications in the current txn.
Args:
query: The datastore_pb.Query to run.
filters: A list of filters that override the ones found on query.
orders: A list of orders that override the ones found on query.
index_list: A list of indexes used by the query.
Returns:
A BaseCursor that can be used to fetch query results.
"""
Check(
query.HasField('ancestor'),
'Query must have an ancestor when performed in a transaction.')
snapshot = self._GrabSnapshot(query.ancestor)
return _ExecuteQuery(
list(snapshot.values()), query, filters, orders, index_list)
@_SynchronizeTxn
def Put(self, entity, insert, indexes):
"""Puts the given entity.
Args:
entity: The entity_pb2.EntityProto to put.
insert: A boolean that indicates if we should fail if the entity already
exists.
indexes: The composite indexes that apply to the entity.
"""
Check(self._mode != datastore_pb.BeginTransactionRequest.READ_ONLY,
'Cannot modify entities in a read-only transaction.')
tracker = self._GetTracker(entity.key)
key = datastore_types.ReferenceToKeyValue(entity.key)
tracker._delete.pop(key, None)
tracker._put[key] = (entity, insert)
self._kind_to_indexes[_GetKeyKind(entity.key)] = indexes
@_SynchronizeTxn
def Delete(self, reference, indexes):
"""Deletes the entity associated with the given reference.
Args:
reference: The entity_pb2.Reference of the entity to delete.
indexes: The composite indexes that apply to the entity.
"""
Check(self._mode != datastore_pb.BeginTransactionRequest.READ_ONLY,
'Cannot modify entities in a read-only transaction.')
tracker = self._GetTracker(reference)
key = datastore_types.ReferenceToKeyValue(reference)
tracker._put.pop(key, None)
tracker._delete[key] = reference
self._kind_to_indexes[_GetKeyKind(reference)] = indexes
@_SynchronizeTxn
def AddActions(self, actions, max_actions=None):
"""Adds the given actions to the current txn.
Args:
actions: A list of pbs to send to taskqueue.Add when the txn is applied.
max_actions: A number that indicates the maximum number of actions to
allow on this txn.
"""
Check(not max_actions or len(self._actions) + len(actions) <= max_actions,
'Too many messages, maximum allowed %s' % max_actions)
Check(self._mode != datastore_pb.BeginTransactionRequest.READ_ONLY,
'Cannot add actions in a read-only transaction.')
self._actions.extend(actions)
def Rollback(self):
"""Rollback the current txn."""
self._lock.acquire()
try:
Check(self._state is self.ACTIVE or self._state is self.FAILED,
'transaction closed')
self._state = self.ROLLEDBACK
finally:
self._txn_manager._RemoveTxn(self)
self._lock.release()
@_SynchronizeTxn
def Commit(self):
"""Commits the current txn.
This function hands off the responsibility of calling _Apply to the owning
TransactionManager.
Returns:
The cost of the transaction.
"""
try:
trackers = self._GetAllTrackers()
empty = True
for tracker in trackers:
snapshot = tracker._GrabSnapshot(self._txn_manager)
empty = empty and not tracker._put and not tracker._delete
for entity, insert in six.itervalues(tracker._put):
Check(
not insert or self.Get(entity.key) is None,
'the id allocated for a new entity was already '
'in use, please try again')
old_entity = None
old_version = None
key = datastore_types.ReferenceToKeyValue(entity.key)
self._mutated_references.append(entity.key)
if key in snapshot:
old_entity = snapshot[key].entity
old_version = snapshot[key].metadata.updated_version
self._AddWriteOps(old_entity, entity)
if _IsNoOpWrite(old_entity, entity):
self._mutation_versions[key] = int(old_version)
for reference in six.itervalues(tracker._delete):
old_entity = None
key = datastore_types.ReferenceToKeyValue(reference)
self._mutated_references.append(reference)
if key in snapshot:
old_entity = snapshot[key].entity
self._AddWriteOps(None, old_entity)
if _IsNoOpWrite(old_entity, None):
self._mutation_versions[key] = int(tracker._read_timestamp)
if empty and not self._actions:
self.Rollback()
return datastore_pb.Cost()
meta_data_list = [tracker._meta_data for tracker in trackers]
self._txn_manager._AcquireWriteLocks(meta_data_list)
except:
raise
try:
for tracker in trackers:
Check(tracker._meta_data._log_pos == tracker._read_pos,
'Concurrency exception.',
datastore_pb.Error.CONCURRENT_TRANSACTION)
for tracker in trackers:
tracker._meta_data.Log(self)
self._state = self.COMMITTED
self._commit_time_s = time.time()
write_timestamp = self._txn_manager._IncrementAndGetCommitTimestamp()
for reference in self._mutated_references:
key = datastore_types.ReferenceToKeyValue(reference)
if key not in self._mutation_versions:
self._mutation_versions[key] = int(write_timestamp)
except:
self.Rollback()
raise
else:
for action in self._actions:
try:
apiproxy_stub_map.MakeSyncCall(
'taskqueue', 'Add', action, api_base_pb2.VoidProto())
except apiproxy_errors.ApplicationError as e:
logger.warning('Transactional task %s has been dropped, %s',
action, e)
self._actions = []
finally:
self._txn_manager._RemoveTxn(self)
self._txn_manager._ReleaseWriteLocks(meta_data_list)
self._txn_manager._consistency_policy._OnCommit(self)
return self._cost
def GetMutationVersion(self, reference):
"""Returns the version of an entity after this transaction has committed."""
assert self._state == self.COMMITTED
key = datastore_types.ReferenceToKeyValue(reference)
return self._mutation_versions[key]
def _AddWriteOps(self, old_entity, new_entity):
"""Adds the cost of writing the new_entity to the _cost member.
We assume that old_entity represents the current state of the Datastore.
Args:
old_entity: Entity representing the current state in the Datastore.
new_entity: Entity representing the desired state in the Datastore.
"""
composite_indexes = self._kind_to_indexes[_GetKeyKind(new_entity.key)]
entity_writes, index_writes = _CalculateWriteOps(
composite_indexes, old_entity, new_entity)
_UpdateCost(self._cost, entity_writes, index_writes)
def _Apply(self, meta_data):
"""Applies the current txn on the given entity group.
This function blindly performs the operations contained in the current txn.
The calling function must acquire the entity group write lock and ensure
transactions are applied in order.
"""
self._apply_lock.acquire()
try:
assert self._state == self.COMMITTED
for tracker in self._entity_groups.values():
if tracker._meta_data is meta_data:
break
else:
assert False
assert tracker._read_pos != tracker.APPLIED
for entity, insert in six.itervalues(tracker._put):
key = datastore_types.ReferenceToKeyValue(entity.key)
if key in tracker._snapshot:
metadata = tracker._snapshot[key].metadata
else:
metadata = entity_pb2.EntityMetadata()
metadata.updated_version = self.GetMutationVersion(entity.key)
record = EntityRecord(entity, metadata)
self._txn_manager._Put(record, insert)
for key in six.itervalues(tracker._delete):
self._txn_manager._Delete(key)
tracker._read_pos = EntityGroupTracker.APPLIED
tracker._meta_data.Unlog(self)
finally:
self._apply_lock.release()
class EntityRecord(object):
"""An EntityProto and its associated EntityMetadata protobuf."""
def __init__(self, entity, metadata=None):
self.entity = entity
self.metadata = metadata or entity_pb2.EntityMetadata()
class EntityGroupTracker(object):
"""An entity group involved a transaction."""
APPLIED = -2
_read_pos = None
_read_timestamp = None
_snapshot = None
_meta_data = None
def __init__(self, entity_group):
self._entity_group = entity_group
self._put = {}
self._delete = {}
def _GrabSnapshot(self, txn_manager):
"""Snapshot this entity group, remembering the read position."""
if self._snapshot is None:
self._meta_data, self._read_pos, self._read_timestamp, self._snapshot = (
txn_manager._GrabSnapshot(self._entity_group))
return self._snapshot
@functools.total_ordering
class EntityGroupMetaData(object):
"""The meta_data assoicated with an entity group."""
_log_pos = -1
_read_timestamp = None
_snapshot = None
def __init__(self, entity_group):
self._entity_group = entity_group
self._write_lock = threading.Lock()
self._apply_queue = []
def CatchUp(self):
"""Applies all outstanding txns."""
assert self._write_lock.acquire(False) is False
while self._apply_queue:
self._apply_queue[0]._Apply(self)
def Log(self, txn):
"""Add a pending transaction to this entity group.
Requires that the caller hold the meta data lock.
This also increments the current log position and clears the snapshot cache.
"""
assert self._write_lock.acquire(False) is False
self._apply_queue.append(txn)
self._log_pos += 1
self._snapshot = None
def Unlog(self, txn):
"""Remove the first pending transaction from the apply queue.
Requires that the caller hold the meta data lock.
This checks that the first pending transaction is indeed txn.
"""
assert self._write_lock.acquire(False) is False
Check(self._apply_queue and self._apply_queue[0] is txn,
'Transaction is not appliable',
datastore_pb.Error.INTERNAL_ERROR)
self._apply_queue.pop(0)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self is other
def __lt__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return id(self) < id(other)
class BaseConsistencyPolicy(object):
"""A base class for a consistency policy to be used with a transaction manger.
"""
def _OnCommit(self, txn):
"""Called after a LiveTxn has been committed.
This function can decide whether to apply the txn right away.
Args:
txn: A LiveTxn that has been committed
"""
raise NotImplementedError
def _OnGroom(self, meta_data_list):
"""Called once for every global query.
This function must aqcuire the write lock for any meta data before applying
any outstanding txns.
Args:
meta_data_list: A list of EntityGroupMetaData objects.
"""
raise NotImplementedError
class MasterSlaveConsistencyPolicy(BaseConsistencyPolicy):
"""Enforces the Master / Slave consistency policy.
Applies all txn on commit.
"""
def _OnCommit(self, txn):
for tracker in txn._GetAllTrackers():
tracker._meta_data._write_lock.acquire()
try:
tracker._meta_data.CatchUp()
finally:
tracker._meta_data._write_lock.release()
txn._txn_manager.Write()
def _OnGroom(self, meta_data_list):
pass
class BaseHighReplicationConsistencyPolicy(BaseConsistencyPolicy):
"""A base class for High Replication Datastore consistency policies.
All txn are applied asynchronously.
"""
def _OnCommit(self, txn):
pass
def _OnGroom(self, meta_data_list):
for meta_data in meta_data_list:
if not meta_data._apply_queue:
continue
meta_data._write_lock.acquire()
try:
while meta_data._apply_queue:
txn = meta_data._apply_queue[0]
if self._ShouldApply(txn, meta_data):
txn._Apply(meta_data)
else:
break
finally:
meta_data._write_lock.release()
def _ShouldApply(self, txn, meta_data):
"""Determines if the given transaction should be applied."""
raise NotImplementedError
class TimeBasedHRConsistencyPolicy(BaseHighReplicationConsistencyPolicy):
"""A High Replication Datastore consistency policy based on elapsed time.
This class tries to simulate performance seen in the high replication
datastore using estimated probabilities of a transaction committing after a
given amount of time.
"""
_classification_map = [(.98, 100),
(.99, 300),
(.995, 2000),
(1, 240000)
]
def SetClassificationMap(self, classification_map):
"""Set the probability a txn will be applied after a given amount of time.
Args:
classification_map: A list of tuples containing (float between 0 and 1,
number of milliseconds) that define the probability of a transaction
applying after a given amount of time.
"""
for prob, delay in classification_map:
if prob < 0 or prob > 1 or delay <= 0:
raise TypeError(
'classification_map must be a list of (probability, delay) tuples, '
'found %r' % (classification_map,))
self._classification_map = sorted(classification_map)
def _ShouldApplyImpl(self, elapsed_ms, classification):
for rate, ms in self._classification_map:
if classification <= rate:
break
return elapsed_ms >= ms
def _Classify(self, txn, meta_data):
return random.Random(id(txn) ^ id(meta_data)).random()
def _ShouldApply(self, txn, meta_data):
elapsed_ms = (time.time() - txn._commit_time_s) * 1000
classification = self._Classify(txn, meta_data)
return self._ShouldApplyImpl(elapsed_ms, classification)
class PseudoRandomHRConsistencyPolicy(BaseHighReplicationConsistencyPolicy):
"""A policy that always gives the same sequence of consistency decisions."""
def __init__(self, probability=.5, seed=0):
"""Constructor.
Args:
probability: A number between 0 and 1 that is the likelihood of a
transaction applying before a global query is executed.
seed: A hashable object to use as a seed. Use None to use the current
timestamp.
"""
self.is_using_cloud_datastore_emulator = False
self.emulator_port = None
self.SetProbability(probability)
self.SetSeed(seed)
def SetProbability(self, probability):
"""Change the probability of a transaction applying.
Args:
probability: A number between 0 and 1 that determines the probability of a
transaction applying before a global query is run.
"""
if probability < 0 or probability > 1:
raise TypeError('probability must be a number between 0 and 1, found %r' %
probability)
self._probability = probability
if self.is_using_cloud_datastore_emulator:
UpdateEmulatorConfig(port=self.emulator_port, consistency_policy=self)
def SetSeed(self, seed):
"""Reset the seed."""
self._random = random.Random(seed)
self._seed = seed
if self.is_using_cloud_datastore_emulator:
UpdateEmulatorConfig(port=self.emulator_port, consistency_policy=self)
@property
def probability(self):
"""Return the probability of applying a job."""
return self._probability
@property
def random_seed(self):
"""Return the random seed."""
return self._seed
def _ShouldApply(self, txn, meta_data):
return self._random.random() < self._probability
class BaseTransactionManager(object):
"""A class that manages the state of transactions.
This includes creating consistent snap shots for transactions.
"""
_commit_timestamp = MINIMUM_VERSION
def __init__(self, consistency_policy=None):
super(BaseTransactionManager, self).__init__()
self._consistency_policy = (consistency_policy or
MasterSlaveConsistencyPolicy())
self._meta_data_lock = threading.Lock()
self._commit_timestamp_lock = threading.Lock()
BaseTransactionManager.Clear(self)
def SetConsistencyPolicy(self, policy):
"""Set the consistency to use.
Causes all data to be flushed.
Args:
policy: A obj inheriting from BaseConsistencyPolicy.
"""
if not isinstance(policy, BaseConsistencyPolicy):
raise TypeError('policy should be of type '
'datastore_stub_util.BaseConsistencyPolicy found %r.' %
(policy,))
self.Flush()
self._consistency_policy = policy
def Clear(self):
"""Discards any pending transactions and resets the meta data."""
self._meta_data = {}
self._txn_map = {}
def BeginTransaction(self, app, allow_multiple_eg, previous_transaction=None,
mode=datastore_pb.BeginTransactionRequest.UNKNOWN):
"""Start a transaction on the given app.
Args:
app: A string representing the app for which to start the transaction.
allow_multiple_eg: True if transactions can span multiple entity groups.
previous_transaction: The transaction to reset.
mode: Mode of the transaction.
Returns:
A datastore_pb.Transaction() for the created transaction
"""
Check((previous_transaction is None) or
mode == datastore_pb.BeginTransactionRequest.READ_WRITE,
'previous_transaction can only be set in READ_WRITE mode')
if previous_transaction is not None:
previous_live_txn = self._txn_map.get(previous_transaction.handle)
if previous_live_txn is not None:
if previous_live_txn._app == app:
Check(previous_live_txn._allow_multiple_eg == allow_multiple_eg,
'Transaction should have same options as previous_transaction')
previous_live_txn.Rollback()
txn = self._BeginTransaction(app, allow_multiple_eg, mode)
self._txn_map[id(txn)] = txn
transaction = datastore_pb.Transaction()
transaction.app = app
transaction.handle = id(txn)
return transaction
def GetTxn(self, transaction, request_trusted, request_app):
"""Gets the LiveTxn object associated with the given transaction.
Args:
transaction: The datastore_pb.Transaction() to look up.
request_trusted: A boolean indicating If the requesting app is trusted.
request_app: A string representing the app making the request.
Returns:
The associated LiveTxn object.
"""
request_app = datastore_types.ResolveAppId(request_app)
CheckTransaction(request_trusted, request_app, transaction)
txn = self._txn_map.get(transaction.handle)
Check(txn and txn._app == transaction.app,
'Transaction(<%s>) not found' % str(transaction).replace('\n', ', '))
return txn
def Groom(self):
"""Attempts to apply any outstanding transactions.
The consistency policy determins if a transaction should be applied.
"""
self._meta_data_lock.acquire()
try:
self._consistency_policy._OnGroom(six.itervalues(self._meta_data))
finally:
self._meta_data_lock.release()
def Flush(self):
"""Applies all outstanding transactions."""
self._meta_data_lock.acquire()
try:
for meta_data in six.itervalues(self._meta_data):
if not meta_data._apply_queue:
continue
meta_data._write_lock.acquire()
try:
meta_data.CatchUp()
finally:
meta_data._write_lock.release()
finally:
self._meta_data_lock.release()
def _IncrementAndGetCommitTimestamp(self):
with self._commit_timestamp_lock:
now_ms = int(time.time() * 1000)
self._commit_timestamp = max(self._commit_timestamp + 1, now_ms)
return self._commit_timestamp
def _GetReadTimestamp(self):
return self._commit_timestamp
def _GetMetaData(self, entity_group):
"""Safely gets the EntityGroupMetaData object for the given entity_group.
"""
self._meta_data_lock.acquire()
try:
key = datastore_types.ReferenceToKeyValue(entity_group)
meta_data = self._meta_data.get(key, None)
if not meta_data:
meta_data = EntityGroupMetaData(entity_group)
self._meta_data[key] = meta_data
return meta_data
finally:
self._meta_data_lock.release()
def _BeginTransaction(self, app, allow_multiple_eg,
mode=datastore_pb.BeginTransactionRequest.UNKNOWN):
"""Starts a transaction without storing it in the txn_map."""
return LiveTxn(self, app, allow_multiple_eg, mode)
def _GrabSnapshot(self, entity_group):
"""Grabs a consistent snapshot of the given entity group.
Args:
entity_group: A entity_pb2.Reference of the entity group of which the
snapshot should be taken.
Returns:
A tuple of (meta_data, log_pos, snapshot) where log_pos is the current log
position and snapshot is a map of reference key value to
entity_pb2.EntityProto.
"""
meta_data = self._GetMetaData(entity_group)
meta_data._write_lock.acquire()
try:
if not meta_data._snapshot:
meta_data.CatchUp()
meta_data._snapshot = self._GetEntitiesInEntityGroup(entity_group)
meta_data._read_timestamp = self._GetReadTimestamp()
return (meta_data, meta_data._log_pos, meta_data._read_timestamp,
meta_data._snapshot)
finally:
meta_data._write_lock.release()
def _AcquireWriteLocks(self, meta_data_list):
"""Acquire the write locks for the given entity group meta data.
These locks must be released with _ReleaseWriteLock before returning to the
user.
Args:
meta_data_list: list of EntityGroupMetaData objects.
"""
for meta_data in sorted(meta_data_list):
meta_data._write_lock.acquire()
def _ReleaseWriteLocks(self, meta_data_list):
"""Release the write locks of the given entity group meta data.
Args:
meta_data_list: list of EntityGroupMetaData objects.
"""
for meta_data in sorted(meta_data_list):
meta_data._write_lock.release()
def _RemoveTxn(self, txn):
"""Removes a LiveTxn from the txn_map (if present)."""
self._txn_map.pop(id(txn), None)
def _Put(self, record, insert):
"""Put the given entity record.
This must be implemented by a sub-class. The sub-class can assume that any
need consistency is enforced at a higher level (and can just put blindly).
Args:
record: The EntityRecord to put.
insert: A boolean that indicates if we should fail if the entity already
exists.
"""
raise NotImplementedError
def _Delete(self, reference):
"""Delete the entity associated with the specified reference.
This must be implemented by a sub-class. The sub-class can assume that any
need consistency is enforced at a higher level (and can just delete
blindly).
Args:
reference: The entity_pb2.Reference of the entity to delete.
"""
raise NotImplementedError
def _GetEntitiesInEntityGroup(self, entity_group):
"""Gets the contents of a specific entity group.
This must be implemented by a sub-class. The sub-class can assume that any
need consistency is enforced at a higher level (and can just blindly read).
Other entity groups may be modified concurrently.
Args:
entity_group: A entity_pb2.Reference of the entity group to get.
Returns:
A dict mapping datastore_types.ReferenceToKeyValue(key) to EntityRecord.
"""
raise NotImplementedError
class BaseIndexManager(object):
"""A generic index manager that stores all data in memory."""
WRITE_ONLY = entity_pb2.CompositeIndex.WRITE_ONLY
READ_WRITE = entity_pb2.CompositeIndex.READ_WRITE
DELETED = entity_pb2.CompositeIndex.DELETED
ERROR = entity_pb2.CompositeIndex.ERROR
_INDEX_STATE_TRANSITIONS = {
WRITE_ONLY: frozenset((READ_WRITE, DELETED, ERROR)),
READ_WRITE: frozenset((DELETED,)),
ERROR: frozenset((DELETED,)),
DELETED: frozenset((ERROR,)),
}
def __init__(self):
self.__indexes = collections.defaultdict(list)
self.__indexes_lock = threading.Lock()
self.__next_index_id = 1
self.__index_id_lock = threading.Lock()
def __FindIndex(self, index):
"""Finds an existing index by definition.
Args:
index: entity_pb2.CompositeIndex
Returns:
entity_pb2.CompositeIndex, if it exists; otherwise None
"""
app = index.app_id
if app in self.__indexes:
for stored_index in self.__indexes[app]:
if index.definition == stored_index.definition:
return stored_index
return None
def CreateIndex(self, index, trusted=False, calling_app=None):
calling_app = datastore_types.ResolveAppId(calling_app)
CheckAppId(trusted, calling_app, index.app_id)
Check(index.id == 0, 'New index id must be 0.')
Check(not self.__FindIndex(index), 'Index already exists.')
self.__index_id_lock.acquire()
index.id = self.__next_index_id
self.__next_index_id += 1
self.__index_id_lock.release()
clone = entity_pb2.CompositeIndex()
clone.CopyFrom(index)
app = index.app_id
clone.app_id = app
self.__indexes_lock.acquire()
try:
self.__indexes[app].append(clone)
finally:
self.__indexes_lock.release()
self._OnIndexChange(index.app_id)
return index.id
def GetIndexes(self, app, trusted=False, calling_app=None):
"""Get the CompositeIndex objects for the given app."""
calling_app = datastore_types.ResolveAppId(calling_app)
CheckAppId(trusted, calling_app, app)
return self.__indexes[app]
def UpdateIndex(self, index, trusted=False, calling_app=None):
CheckAppId(trusted, calling_app, index.app_id)
stored_index = self.__FindIndex(index)
Check(stored_index, 'Index does not exist.')
Check(index.state == stored_index.state or
index.state in self._INDEX_STATE_TRANSITIONS[stored_index.state],
'cannot move index state from %s to %s' %
(stored_index.state, index.state))
self.__indexes_lock.acquire()
try:
stored_index.state = index.state
finally:
self.__indexes_lock.release()
self._OnIndexChange(index.app_id)
def DeleteIndex(self, index, trusted=False, calling_app=None):
CheckAppId(trusted, calling_app, index.app_id)
stored_index = self.__FindIndex(index)
Check(stored_index, 'Index does not exist.')
app = index.app_id
self.__indexes_lock.acquire()
try:
self.__indexes[app].remove(stored_index)
finally:
self.__indexes_lock.release()
self._OnIndexChange(index.app_id)
def _SideLoadIndex(self, index):
self.__indexes[index.app].append(index)
def _OnIndexChange(self, app_id):
pass
def _CheckAutoIdPolicy(auto_id_policy):
"""Check value of auto_id_policy.
Args:
auto_id_policy: string constant.
Raises:
TypeError: if auto_id_policy is not one of SEQUENTIAL or SCATTERED.
"""
valid_policies = (SEQUENTIAL, SCATTERED)
if auto_id_policy not in valid_policies:
raise TypeError('auto_id_policy must be in %s, found %s instead',
valid_policies, auto_id_policy)
class BaseDatastore(BaseTransactionManager, BaseIndexManager):
"""A base implementation of a Datastore.
This class implements common functions associated with a datastore and
enforces security restrictions passed on by a stub or client. It is designed
to be shared by any number of threads or clients serving any number of apps.
If an app is not specified explicitly it is pulled from the env and assumed to
be untrusted.
"""
_MAX_QUERY_COMPONENTS = 100
_BATCH_SIZE = 20
_MAX_ACTIONS_PER_TXN = 5
def __init__(self, require_indexes=False, consistency_policy=None,
use_atexit=True, auto_id_policy=SEQUENTIAL):
BaseTransactionManager.__init__(self, consistency_policy=consistency_policy)
BaseIndexManager.__init__(self)
self._require_indexes = require_indexes
self._pseudo_kinds = {}
self.SetAutoIdPolicy(auto_id_policy)
if use_atexit:
atexit.register(self.Write)
def Clear(self):
"""Clears out all stored values."""
BaseTransactionManager.Clear(self)
def _RegisterPseudoKind(self, kind):
"""Registers a pseudo kind to be used to satisfy a meta data query."""
self._pseudo_kinds[kind.name] = kind
kind._stub = weakref.proxy(self)
def GetQueryCursor(self, raw_query, trusted=False, calling_app=None):
"""Execute a query.
Args:
raw_query: The non-validated datastore_pb.Query to run.
trusted: If the calling app is trusted.
calling_app: The app requesting the results or None to pull the app from
the environment.
Returns:
A BaseCursor that can be used to retrieve results.
"""
calling_app = datastore_types.ResolveAppId(calling_app)
CheckAppId(trusted, calling_app, raw_query.app)
filters, orders = datastore_index.Normalize(raw_query.filter,
raw_query.order,
raw_query.property_name)
CheckQuery(raw_query, filters, orders, self._MAX_QUERY_COMPONENTS)
FillUsersInQuery(filters)
if self._require_indexes:
self._CheckHasIndex(raw_query, trusted, calling_app)
index_list = self.__IndexListForQuery(raw_query)
if raw_query.HasField('transaction'):
Check(raw_query.kind not in self._pseudo_kinds,
'transactional queries on "%s" not allowed' % raw_query.kind)
txn = self.GetTxn(raw_query.transaction, trusted, calling_app)
return txn.GetQueryCursor(raw_query, filters, orders, index_list)
if raw_query.HasField(
'ancestor') and raw_query.kind not in self._pseudo_kinds:
txn = self._BeginTransaction(
raw_query.app, False, datastore_pb.BeginTransactionRequest.READ_ONLY)
return txn.GetQueryCursor(raw_query, filters, orders, index_list)
self.Groom()
return self._GetQueryCursor(raw_query, filters, orders, index_list)
def __IndexListForQuery(self, query):
"""Get the single composite index pb used by the query, if any, as a list.
Args:
query: the datastore_pb.Query to compute the index list for
Returns:
A singleton list of the composite index pb used by the query,
"""
required, kind, ancestor, props = (
datastore_index.CompositeIndexForQuery(query))
if not required:
return []
composite_index_pb = entity_pb2.CompositeIndex()
composite_index_pb.app_id = query.app
composite_index_pb.id = 0
composite_index_pb.state = entity_pb2.CompositeIndex.READ_WRITE
index_pb = composite_index_pb.definition
index_pb.entity_type = kind
index_pb.ancestor = bool(ancestor)
for prop in datastore_index.GetRecommendedIndexProperties(props):
prop_pb = entity_pb2.Index.Property()
prop.CopyToIndexPb(prop_pb)
index_pb.property.append(prop_pb)
return [composite_index_pb]
def Get(self, raw_keys, transaction=None, eventual_consistency=False,
trusted=False, calling_app=None):
"""Get the entity records for the given keys.
Args:
raw_keys: A list of unverified entity_pb2.Reference objects.
transaction: The datastore_pb.Transaction to use or None.
eventual_consistency: If we should allow stale, potentially inconsistent
results.
trusted: If the calling app is trusted.
calling_app: The app requesting the results or None to pull the app from
the environment.
Returns:
A list containing the records in the same order as the list of keys.
"""
if not raw_keys:
return []
calling_app = datastore_types.ResolveAppId(calling_app)
records = []
if not transaction and eventual_consistency:
self.Groom()
for key in raw_keys:
CheckReference(calling_app, trusted, key)
records.append(self._GetWithPseudoKinds(None, key,
eventual_consistency))
else:
grouped_keys = collections.defaultdict(list)
for i, key in enumerate(raw_keys):
CheckReference(trusted, calling_app, key)
entity_group = _GetEntityGroup(key)
entity_group_key = datastore_types.ReferenceToKeyValue(entity_group)
grouped_keys[entity_group_key].append((key, i))
if transaction:
txn = self.GetTxn(transaction, trusted, calling_app)
records = [self._GetWithPseudoKinds(txn, key, eventual_consistency)
for key in raw_keys]
else:
records = [None] * len(raw_keys)
def op(txn, v):
key, i = v
records[i] = self._GetWithPseudoKinds(txn, key, eventual_consistency)
for keys in six.itervalues(grouped_keys):
self._RunInTxn(keys, keys[0][0].app, op)
return records
def _GetWithPseudoKinds(self, txn, key, eventual_consistency=False):
"""Fetch entity key in txn, taking account of pseudo-kinds."""
pseudo_kind = self._pseudo_kinds.get(_GetKeyKind(key), None)
if pseudo_kind:
return EntityRecord(pseudo_kind.Get(txn, key))
else:
if txn:
record = txn.Get(key)
else:
record = self._Get(key)
if not record:
metadata = entity_pb2.EntityMetadata()
if not eventual_consistency:
metadata.updated_version = self._GetReadTimestamp()
record = EntityRecord(None, metadata)
return record
def Put(self, raw_entities, cost, transaction=None,
trusted=False, calling_app=None):
"""Writes the given given entities.
Updates an entity's key and entity_group in place if needed
Args:
raw_entities: A list of unverified entity_pb2.EntityProto objects.
cost: Out param. The cost of putting the provided entities.
transaction: The datastore_pb.Transaction() to use or None.
trusted: If the calling app is trusted.
calling_app: The app requesting the results or None to pull the app from
the environment.
Returns:
A list of tuple (entity_pb2.Reference, version number) that indicates
where each entity was stored and at which version. When a transaction is
provided, all version numbers are None.
"""
if not raw_entities:
return []
calling_app = datastore_types.ResolveAppId(calling_app)
result = [None] * len(raw_entities)
grouped_entities = collections.defaultdict(list)
for i, raw_entity in enumerate(raw_entities):
CheckEntity(trusted, calling_app, raw_entity)
entity = entity_pb2.EntityProto()
entity.CopyFrom(raw_entity)
for prop in itertools.chain(entity.property, entity.raw_property):
FillUser(prop)
last_element = entity.key.path.element[-1]
if not (last_element.id or last_element.HasField('name')):
insert = True
if self._auto_id_policy == SEQUENTIAL:
last_element.id = self._AllocateSequentialIds(entity.key)[0]
else:
full_key = self._AllocateIds([entity.key])[0]
last_element.id = full_key.path.element[-1].id
else:
insert = False
entity_group = _GetEntityGroup(entity.key)
entity.entity_group.CopyFrom(entity_group.path)
entity_group_key = datastore_types.ReferenceToKeyValue(entity_group)
grouped_entities[entity_group_key].append((entity, insert, i))
key = entity_pb2.Reference()
key.CopyFrom(entity.key)
result[i] = key
mutation_versions = [None] * len(raw_entities)
if transaction:
txn = self.GetTxn(transaction, trusted, calling_app)
for group in grouped_entities.values():
for entity, insert, _ in group:
indexes = _FilterIndexesByKind(
entity.key, self.GetIndexes(entity.key.app, trusted, calling_app))
txn.Put(entity, insert, indexes)
else:
for entities in six.itervalues(grouped_entities):
txn = self._RunInTxn(
entities,
entities[0][0].key.app,
lambda txn, v: txn.Put(
v[0], v[1],
_FilterIndexesByKind(
v[0].key, self.GetIndexes(v[0].key.app, trusted, calling_app
))))
for entity, _, index in entities:
mutation_versions[index] = txn.GetMutationVersion(entity.key)
_UpdateCost(cost, txn._cost.entity_writes, txn._cost.index_writes)
return list(zip(result, mutation_versions))
def Delete(self, raw_keys, cost, transaction=None,
trusted=False, calling_app=None):
"""Deletes the entities associated with the given keys.
Args:
raw_keys: A list of unverified entity_pb2.Reference objects.
cost: Out param. The cost of putting the provided entities.
transaction: The datastore_pb.Transaction() to use or None.
trusted: If the calling app is trusted.
calling_app: The app requesting the results or None to pull the app from
the environment.
Returns:
A list of versions numbers at which the entities were deleted, one for
each given keys. Every version numbers are None if a transaction is given.
"""
if not raw_keys:
return []
calling_app = datastore_types.ResolveAppId(calling_app)
grouped_keys = collections.defaultdict(list)
for i, key in enumerate(raw_keys):
CheckReference(trusted, calling_app, key)
entity_group = _GetEntityGroup(key)
entity_group_key = datastore_types.ReferenceToKeyValue(entity_group)
grouped_keys[entity_group_key].append((key, i))
mutation_versions = [None] * len(raw_keys)
if transaction:
txn = self.GetTxn(transaction, trusted, calling_app)
for key in raw_keys:
indexes = _FilterIndexesByKind(
key, self.GetIndexes(key.app, trusted, calling_app))
txn.Delete(key, indexes)
else:
for keys in six.itervalues(grouped_keys):
txn = self._RunInTxn(
keys, keys[0][0].app, lambda txn, key: txn.Delete(
key[0],
_FilterIndexesByKind(
key[0], self.GetIndexes(key[0].app, trusted, calling_app))))
for key, index in keys:
mutation_versions[index] = txn.GetMutationVersion(key)
_UpdateCost(cost, txn._cost.entity_writes, txn._cost.index_writes)
return mutation_versions
def Touch(self, raw_keys, trusted=False, calling_app=None):
"""Applies all outstanding writes."""
calling_app = datastore_types.ResolveAppId(calling_app)
grouped_keys = collections.defaultdict(list)
for key in raw_keys:
CheckReference(trusted, calling_app, key)
entity_group = _GetEntityGroup(key)
entity_group_key = datastore_types.ReferenceToKeyValue(entity_group)
grouped_keys[entity_group_key].append(key)
for keys in six.itervalues(grouped_keys):
self._RunInTxn(keys, keys[0].app, lambda txn, key: None)
def _RunInTxn(self, values, app, op):
"""Runs the given values in a separate Txn.
Retries up to _RETRIES times on CONCURRENT_TRANSACTION errors.
Args:
values: A list of arguments to op.
app: The app to create the Txn on.
op: A function to run on each value in the Txn.
Returns:
The transaction that was committed.
"""
retries = 0
backoff = _INITIAL_RETRY_DELAY_MS / 1000.0
while True:
txn = self._BeginTransaction(app, False)
try:
for value in values:
op(txn, value)
txn.Commit()
return txn
except apiproxy_errors.ApplicationError as e:
try:
txn.Rollback()
except Exception:
logger.debug('Exception in rollback.', exc_info=True)
if e.application_error == datastore_pb.Error.CONCURRENT_TRANSACTION:
retries += 1
if retries <= _RETRIES:
time.sleep(backoff)
backoff *= _RETRY_DELAY_MULTIPLIER
if backoff * 1000.0 > _MAX_RETRY_DELAY_MS:
backoff = _MAX_RETRY_DELAY_MS / 1000.0
continue
raise
def _CheckHasIndex(self, query, trusted=False, calling_app=None):
"""Checks if the query can be satisfied given the existing indexes.
Args:
query: the datastore_pb.Query to check
trusted: True if the calling app is trusted (like dev_admin_console)
calling_app: app_id of the current running application
Raises:
apiproxy_errors.ApplicationError: if the query can be satisfied
given the existing indexes.
"""
if query.kind in self._pseudo_kinds:
return
minimal_index = datastore_index.MinimalCompositeIndexForQuery(
query, (datastore_index.ProtoToIndexDefinition(index)
for index in self.GetIndexes(query.app, trusted, calling_app)
if index.state == entity_pb2.CompositeIndex.READ_WRITE))
if minimal_index is not None:
msg = ('This query requires a composite index that is not defined. '
'You must update the index.yaml file in your application root.')
is_most_efficient, kind, ancestor, properties = minimal_index
if not is_most_efficient:
yaml = datastore_index.IndexYamlForQuery(
kind, ancestor,
datastore_index.GetRecommendedIndexProperties(properties))
msg += '\nThe following index is the minimum index required:\n' + yaml
raise apiproxy_errors.ApplicationError(datastore_pb.Error.NEED_INDEX, msg)
def SetAutoIdPolicy(self, auto_id_policy):
"""Set value of _auto_id_policy flag (default SEQUENTIAL).
SEQUENTIAL auto ID assignment behavior will eventually be deprecated
and the default will be SCATTERED.
Args:
auto_id_policy: string constant.
Raises:
TypeError: if auto_id_policy is not one of SEQUENTIAL or SCATTERED.
"""
_CheckAutoIdPolicy(auto_id_policy)
self._auto_id_policy = auto_id_policy
def Write(self):
"""Writes the datastore to disk."""
self.Flush()
def Close(self):
"""Closes the Datastore, writing any buffered data."""
self.Write()
def _GetQueryCursor(self, query, filters, orders, index_list):
"""Runs the given datastore_pb.Query and returns a QueryCursor for it.
This must be implemented by a sub-class. The sub-class does not need to
enforced any consistency guarantees (and can just blindly read).
Args:
query: The datastore_pb.Query to run.
filters: A list of filters that override the ones found on query.
orders: A list of orders that override the ones found on query.
index_list: A list of indexes used by the query.
Returns:
A BaseCursor that can be used to fetch query results.
"""
raise NotImplementedError
def _Get(self, reference):
"""Get the entity record for the given reference or None.
This must be implemented by a sub-class. The sub-class does not need to
enforced any consistency guarantees (and can just blindly read).
Args:
reference: A entity_pb2.Reference to loop up.
Returns:
The EntityRecord associated with the given reference or None.
"""
raise NotImplementedError
def _AllocateSequentialIds(self, reference, size=1, max_id=None):
"""Allocate sequential ids for given reference.
Args:
reference: An entity_pb2.Reference to allocate an id for.
size: The size of the range to allocate
max_id: The upper bound of the range to allocate
Returns:
A tuple containing (min, max) of the allocated range.
"""
raise NotImplementedError
def _AllocateIds(self, references):
"""Allocate or reserves IDs for the v1 datastore API.
Incomplete keys are allocated scattered IDs. Complete keys have every id in
their paths reserved in the appropriate ID space.
Args:
references: a list of entity_pb2.Reference objects to allocate or reserve
Returns:
a list of complete entity_pb2.Reference objects corresponding to the
incomplete keys in the input, with newly allocated ids.
"""
raise NotImplementedError
def _NeedsIndexes(func):
"""A decorator for DatastoreStub methods that require or affect indexes.
Updates indexes to match index.yaml before the call and updates index.yaml
after the call if require_indexes is False. If root_path is not set, this is a
no op.
"""
def UpdateIndexesWrapper(self, *args, **kwargs):
with self._index_setup_lock:
self._SetupIndexes()
try:
return func(self, *args, **kwargs)
finally:
self._UpdateIndexes()
return UpdateIndexesWrapper
class EntityGroupPseudoKind(object):
"""A common implementation of get() for the __entity_group__ pseudo-kind.
Public properties:
name: the pseudo-kind name
"""
name = '__entity_group__'
base_version = int(time.time() * 1e6)
def Get(self, txn, key):
"""Fetch key of this pseudo-kind within txn.
Args:
txn: transaction within which Get occurs, may be None if this is an
eventually consistent Get.
key: key of pseudo-entity to Get.
Returns:
An entity for key, or None if it doesn't exist.
"""
if not txn:
txn = self._stub._BeginTransaction(
key.app, False,
datastore_pb.BeginTransactionRequest.READ_ONLY)
try:
return self.Get(txn, key)
finally:
txn.Rollback()
if isinstance(txn._txn_manager._consistency_policy,
MasterSlaveConsistencyPolicy):
return None
path = key.path
if len(path.element) != 2 or path.element[-1].id != 1:
return None
tracker = txn._GetTracker(key)
tracker._GrabSnapshot(txn._txn_manager)
eg = entity_pb2.EntityProto()
eg.key.CopyFrom(key)
eg.entity_group.CopyFrom(_GetEntityGroup(key).path)
version = entity_pb2.Property()
version.name = '__version__'
version.multiple = False
version.value.int64Value = tracker._read_pos + self.base_version
eg.property.append(version)
return eg
def Query(self, query, filters, orders):
"""Perform a query on this pseudo-kind.
Args:
query: the original datastore_pb.Query.
filters: the filters from query.
orders: the orders from query.
Returns:
always raises an error
"""
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST, 'queries not supported on ' + self.name)
class _CachedIndexDefinitions(object):
"""Records definitions read from index configuration files for later reuse.
If the names and modification times of the configuration files are unchanged,
then the index configurations previously parsed out of those files can be
reused.
Attributes:
file_names: a list of the names of the configuration files. This will have
one element when the configuration is based on an index.yaml but may have
more than one if it is based on datastore-indexes.xml and
datastore-indexes-auto.xml.
last_modifieds: a list of floats that are the modification times of the
files in file_names.
index_protos: a list of entity_pb2.CompositeIndex objects corresponding to
the index definitions read from file_names.
"""
def __init__(self, file_names, last_modifieds, index_protos):
assert len(file_names) <= 1
self.file_names = file_names
self.last_modifieds = last_modifieds
self.index_protos = index_protos
class DatastoreStub(object):
"""A stub that maps datastore service calls on to a BaseDatastore.
This class also keeps track of query cursors.
"""
def __init__(self,
datastore,
app_id=None,
trusted=None,
root_path=None):
super(DatastoreStub, self).__init__()
self._datastore = datastore
self._app_id = datastore_types.ResolveAppId(app_id)
self._trusted = trusted
self._root_path = root_path
self._xml_configuration = self._XmlConfiguration()
self._index_setup_lock = threading.Lock()
self.__query_history = {}
self.__query_ci_history = set()
self.__query_history_lock = threading.Lock()
self._cached_index_definitions = _CachedIndexDefinitions([], [], None)
if self._require_indexes or root_path is None:
self._index_config_updater = None
else:
updater_class = (
datastore_stub_index.DatastoreIndexesAutoXmlUpdater
if self._xml_configuration else datastore_stub_index.IndexYamlUpdater)
self._index_config_updater = updater_class(root_path)
DatastoreStub.Clear(self)
def _XmlConfiguration(self):
"""Return True if the app at self._root_path uses XML configuration files.
An app uses XML configuration files if it has a WEB-INF subdirectory and it
does not have an index.yaml at its root. We assume this even if it doesn't
currently have any configuration files at all, because then we will want to
create a new datastore-indexes-auto.xml rather than create a new index.yaml.
Returns:
True if the app uses XML configuration files, False otherwise.
Raises:
yaml_errors.AmbiguousConfigurationFiles: if there is both an index.yaml
and either or both of the two possible XML configuration files.
"""
if not self._root_path:
return False
index_yaml = os.path.join(self._root_path, 'index.yaml')
web_inf = os.path.join(self._root_path, 'WEB-INF')
datastore_indexes_xml = os.path.join(web_inf, 'datastore-indexes.xml')
datastore_indexes_auto_xml = os.path.join(
web_inf, 'appengine-generated', 'datastore-indexes-auto.xml')
existing = [
f for f in [
index_yaml, datastore_indexes_xml, datastore_indexes_auto_xml]
if os.path.isfile(f)]
if existing == [index_yaml]:
return False
elif index_yaml in existing:
raise yaml_errors.AmbiguousConfigurationFiles(
'App has both XML and YAML configuration files: %s' % existing)
else:
return os.path.isdir(web_inf)
def Clear(self):
"""Clears out all stored values."""
self._query_cursors = {}
with self.__query_history_lock:
self.__query_history = {}
self.__query_ci_history = set()
def QueryHistory(self):
"""Returns a dict that maps Query PBs to times they've been run."""
with self.__query_history_lock:
return dict((pb, times) for pb, times in self.__query_history.items()
if pb.app == self._app_id)
def _QueryCompositeIndexHistoryLength(self):
"""Returns the length of the CompositeIndex set for query history."""
with self.__query_history_lock:
return len(self.__query_ci_history)
def SetTrusted(self, trusted):
"""Set/clear the trusted bit in the stub.
This bit indicates that the app calling the stub is trusted. A
trusted app can write to datastores of other apps.
Args:
trusted: boolean.
"""
self._trusted = trusted
def _Dynamic_Get(self, req, res):
transaction = req.HasField('transaction') and req.transaction or None
if req.allow_deferred and len(req.key) > _MAXIMUM_RESULTS:
keys_to_get = req.key[-_MAXIMUM_RESULTS:]
deferred_keys = req.key[:-_MAXIMUM_RESULTS]
res.deferred.extend(deferred_keys)
else:
keys_to_get = req.key
res.in_order = not req.allow_deferred
total_response_bytes = 0
for index, record in enumerate(
self._datastore.Get(keys_to_get, transaction,
req.HasField('failover_ms'), self._trusted,
self._app_id)):
entity = record.entity
entity_size = entity and entity.ByteSize() or 0
if (req.allow_deferred and index > 0 and
total_response_bytes + entity_size > _MAXIMUM_QUERY_RESULT_BYTES):
res.deferred.extend(keys_to_get[index:])
break
else:
entity_result = res.entity.add()
if record.metadata.HasField('updated_version'):
entity_result.version = record.metadata.updated_version
if entity:
entity_result.entity.CopyFrom(entity)
total_response_bytes += entity_size
else:
entity_result.key.CopyFrom(keys_to_get[index])
def _Dynamic_Put(self, req, res):
transaction = req.HasField('transaction') and req.transaction or None
results = self._datastore.Put(req.entity, res.cost, transaction,
self._trusted, self._app_id)
res.key.extend(result[0] for result in results)
if not transaction:
res.version.extend(result[1] for result in results)
def _Dynamic_Delete(self, req, res):
transaction = req.HasField('transaction') and req.transaction or None
versions = self._datastore.Delete(req.key, res.cost, transaction,
self._trusted, self._app_id)
if not transaction:
res.version.extend(versions)
def _Dynamic_Touch(self, req, _):
self._datastore.Touch(req.key, self._trusted, self._app_id)
@_NeedsIndexes
def _Dynamic_RunQuery(self, query, query_result):
cursor = self._datastore.GetQueryCursor(query, self._trusted, self._app_id)
count = query.count if query.HasField('count') else None
cursor.PopulateQueryResult(
query_result, count, query.offset, query.compile, first_result=True)
if query_result.HasField('cursor'):
self._query_cursors[query_result.cursor.cursor] = cursor
if query.compile:
compiled_query = query_result.compiled_query
compiled_query.keys_only = query.keys_only
compiled_query.primaryscan.index_name = 'devindex'
self.__UpdateQueryHistory(query)
def __UpdateQueryHistory(self, query):
clone = datastore_pb.Query()
clone.CopyFrom(query)
clone.ClearField('hint')
clone.ClearField('limit')
clone.ClearField('offset')
clone.ClearField('count')
with self.__query_history_lock:
if clone in self.__query_history:
self.__query_history[clone] += 1
else:
self.__query_history[clone] = 1
if clone.app == self._app_id:
self.__query_ci_history.add(
datastore_index.CompositeIndexForQuery(clone))
def _Dynamic_Next(self, next_request, query_result):
app = next_request.cursor.app
CheckAppId(self._trusted, self._app_id, app)
cursor = self._query_cursors.get(next_request.cursor.cursor)
Check(cursor and cursor.app == app,
'Cursor %d not found' % next_request.cursor.cursor)
count = next_request.count if next_request.HasField('count') else None
offset = next_request.offset if next_request.HasField('offset') else None
cursor.PopulateQueryResult(
query_result, count, offset, next_request.compile, first_result=False)
if not query_result.HasField('cursor'):
del self._query_cursors[next_request.cursor.cursor]
def _Dynamic_AddActions(self, request, _):
"""Associates the creation of one or more tasks with a transaction.
Args:
request: A taskqueue_service_pb2.TaskQueueBulkAddRequest containing the
tasks that should be created when the transaction is committed.
"""
if len(request.add_request) == 0:
return
first_add_request = request.add_request[0]
datastore_transaction = None
if first_add_request.HasField('datastore_transaction'):
datastore_transaction = first_add_request.datastore_transaction
transaction = datastore_pb.Transaction()
get_service_converter().v1_to_v3_txn(datastore_transaction, transaction)
else:
transaction = first_add_request.transaction
txn = self._datastore.GetTxn(transaction, self._trusted, self._app_id)
new_actions = []
for add_request in request.add_request:
Check(datastore_transaction is not None
and add_request.datastore_transaction == datastore_transaction
or add_request.transaction == transaction,
'Cannot add requests to different transactions')
clone = taskqueue_service_pb2.TaskQueueAddRequest()
clone.CopyFrom(add_request)
clone.ClearField('transaction')
clone.ClearField('datastore_transaction')
new_actions.append(clone)
txn.AddActions(new_actions, self._MAX_ACTIONS_PER_TXN)
def _Dynamic_BeginTransaction(self, req, transaction):
CheckAppId(self._trusted, self._app_id, req.app)
transaction.CopyFrom(
self._datastore.BeginTransaction(
req.app, req.allow_multiple_eg, req.previous_transaction
if req.HasField('previous_transaction') else None, req.mode))
def _Dynamic_Commit(self, transaction, res):
CheckAppId(self._trusted, self._app_id, transaction.app)
txn = self._datastore.GetTxn(transaction, self._trusted, self._app_id)
res.cost.CopyFrom(txn.Commit())
for reference in txn._mutated_references:
commit_version = res.version.add()
commit_version.root_entity_key.CopyFrom(reference)
commit_version.version = txn.GetMutationVersion(reference)
def _Dynamic_Rollback(self, transaction, _):
CheckAppId(self._trusted, self._app_id, transaction.app)
txn = self._datastore.GetTxn(transaction, self._trusted, self._app_id)
txn.Rollback()
def _Dynamic_CreateIndex(self, index, id_response):
id_response.value = self._datastore.CreateIndex(index,
self._trusted,
self._app_id)
@_NeedsIndexes
def _Dynamic_GetIndices(self, get_indicies_request, composite_indices):
composite_indices.index.extend(
self._datastore.GetIndexes(get_indicies_request.app_id, self._trusted,
self._app_id))
def _Dynamic_UpdateIndex(self, index, _):
self._datastore.UpdateIndex(index, self._trusted, self._app_id)
def _Dynamic_DeleteIndex(self, index, _):
self._datastore.DeleteIndex(index, self._trusted, self._app_id)
def _Dynamic_AllocateIds(self, allocate_ids_request, allocate_ids_response):
Check(
not allocate_ids_request.HasField('model_key') or
not allocate_ids_request.reserve,
'Cannot allocate and reserve IDs in the same request')
if allocate_ids_request.reserve:
Check(not allocate_ids_request.HasField('size'),
'Cannot specify size when reserving IDs')
Check(not allocate_ids_request.HasField('max'),
'Cannot specify max when reserving IDs')
if allocate_ids_request.HasField('model_key'):
CheckAppId(allocate_ids_request.model_key.app, self._trusted,
self._app_id)
reference = allocate_ids_request.model_key
(start,
end) = self._datastore._AllocateSequentialIds(reference,
allocate_ids_request.size,
allocate_ids_request.max)
allocate_ids_response.start = start
allocate_ids_response.end = end
else:
for reference in allocate_ids_request.reserve:
CheckReference(self._trusted, self._app_id, reference)
self._datastore._AllocateIds(allocate_ids_request.reserve)
allocate_ids_response.start = 0
allocate_ids_response.end = 0
def _SetupIndexes(self, _open=open):
"""Ensure that the set of existing composite indexes matches index.yaml.
Note: this is similar to the algorithm used by the admin console for
the same purpose.
"""
if not self._root_path:
return
file_names = [os.path.join(self._root_path, 'index.yaml')]
file_mtimes = [os.path.getmtime(f) for f in file_names if os.path.exists(f)]
if (self._cached_index_definitions.file_names == file_names and
all(os.path.exists(f) for f in file_names) and
self._cached_index_definitions.last_modifieds == file_mtimes):
requested_indexes = self._cached_index_definitions.index_protos
else:
file_mtimes = []
index_texts = []
for file_name in file_names:
try:
file_mtimes.append(os.path.getmtime(file_name))
with _open(file_name, 'r') as fh:
index_texts.append(fh.read())
except (OSError, IOError):
pass
requested_indexes = []
if len(index_texts) == len(file_names):
all_ok = True
for index_text in index_texts:
index_defs = datastore_index.ParseIndexDefinitions(index_text)
if index_defs is None or index_defs.indexes is None:
all_ok = False
else:
requested_indexes.extend(
datastore_index.IndexDefinitionsToProtos(
self._app_id, index_defs.indexes))
if all_ok:
self._cached_index_definitions = _CachedIndexDefinitions(
file_names, file_mtimes, requested_indexes)
existing_indexes = self._datastore.GetIndexes(
self._app_id, self._trusted, self._app_id)
requested = dict(
(x.definition.SerializeToString(), x) for x in requested_indexes)
existing = dict(
(x.definition.SerializeToString(), x) for x in existing_indexes)
created = 0
for key, index in six.iteritems(requested):
if key not in existing:
new_index = entity_pb2.CompositeIndex()
new_index.CopyFrom(index)
new_index.id = datastore_admin.CreateIndex(new_index)
new_index.state = entity_pb2.CompositeIndex.READ_WRITE
datastore_admin.UpdateIndex(new_index)
created += 1
deleted = 0
for key, index in six.iteritems(existing):
if key not in requested:
datastore_admin.DeleteIndex(index)
deleted += 1
if created or deleted:
logger.debug('Created %d and deleted %d index(es); total %d',
created, deleted, len(requested))
def _UpdateIndexes(self):
if self._index_config_updater is not None:
self._index_config_updater.UpdateIndexConfig()
class StubQueryConverter(datastore_pbs._QueryConverter):
"""Converter for v3, v4 and v1 queries suitable for use in stubs."""
def __init__(self, entity_converter):
super(StubQueryConverter, self).__init__(entity_converter)
def v4_to_v3_compiled_cursor(self, v4_cursor, v3_compiled_cursor):
"""Converts a v4 cursor string to a v3 CompiledCursor.
Args:
v4_cursor: a string representing a v4 query cursor
v3_compiled_cursor: a datastore_pb.CompiledCursor to populate
"""
v3_compiled_cursor.Clear()
try:
v3_compiled_cursor.ParseFromString(v4_cursor)
except message.DecodeError:
raise datastore_pbs.InvalidConversionError('Invalid query cursor.')
def v3_to_v4_compiled_cursor(self, v3_compiled_cursor):
"""Converts a v3 CompiledCursor to a v4 cursor string.
Args:
v3_compiled_cursor: a datastore_pb.CompiledCursor
Returns:
a string representing a v4 query cursor
"""
return v3_compiled_cursor.SerializeToString()
def v4_to_v3_query(self, v4_partition_id, v4_query, v3_query):
"""Converts a v4 Query to a v3 Query.
Args:
v4_partition_id: a datastore_v4_pb2.PartitionId
v4_query: a datastore_v4_pb2.Query
v3_query: a datastore_pb.Query to populate
Raises:
InvalidConversionError if the query cannot be converted
"""
v3_query.Clear()
if v4_partition_id.dataset_id:
v3_query.app = v4_partition_id.dataset_id
if v4_partition_id.HasField('namespace'):
v3_query.name_space = v4_partition_id.namespace
v3_query.persist_offset = True
v3_query.require_perfect_plan = True
v3_query.compile = True
if v4_query.HasField('limit'):
v3_query.limit = v4_query.limit
if v4_query.offset:
v3_query.offset = v4_query.offset
if v4_query.HasField('start_cursor'):
self.v4_to_v3_compiled_cursor(v4_query.start_cursor,
v3_query.compiled_cursor)
if v4_query.HasField('end_cursor'):
self.v4_to_v3_compiled_cursor(v4_query.end_cursor,
v3_query.end_compiled_cursor)
if len(v4_query.kind) > 0:
datastore_pbs.check_conversion(len(v4_query.kind) == 1,
'multiple kinds not supported')
v3_query.kind = v4_query.kind[0].name
has_key_projection = False
for prop in v4_query.projection:
if prop.property.name == datastore_pbs.PROPERTY_NAME_KEY:
has_key_projection = True
else:
v3_query.property_name.append(prop.property.name)
if has_key_projection and len(v3_query.property_name) == 0:
v3_query.keys_only = True
for prop in v4_query.group_by:
v3_query.group_by_property_name.append(prop.name)
self.__populate_v3_filters_from_v4(v4_query.filter, v3_query)
for v4_order in v4_query.order:
v3_order = v3_query.order.add()
v3_order.property = v4_order.property.name
if v4_order.HasField('direction'):
v3_order.direction = v4_order.direction
def v3_to_v4_query(self, v3_query, v4_query):
"""Converts a v3 Query to a v4 Query.
Args:
v3_query: a datastore_pb.Query
v4_query: a datastore_v4_pb2.Query to populate
Raises:
InvalidConversionError if the query cannot be converted
"""
v4_query.Clear()
datastore_pbs.check_conversion(not v3_query.HasField('distinct'),
'distinct option not supported')
datastore_pbs.check_conversion(v3_query.require_perfect_plan,
'non-perfect plans not supported')
if v3_query.HasField('limit'):
v4_query.limit = v3_query.limit
if v3_query.offset:
v4_query.offset = v3_query.offset
if v3_query.HasField('compiled_cursor'):
v4_query.start_cursor = (
self.v3_to_v4_compiled_cursor(v3_query.compiled_cursor))
if v3_query.HasField('end_compiled_cursor'):
v4_query.end_cursor = (
self.v3_to_v4_compiled_cursor(v3_query.end_compiled_cursor))
if v3_query.HasField('kind'):
v4_query.kind.add().name = v3_query.kind
for name in v3_query.property_name:
v4_query.projection.add().property.name = name
if v3_query.keys_only:
v4_query.projection.add().property.name = (
datastore_pbs.PROPERTY_NAME_KEY)
for name in v3_query.group_by_property_name:
v4_query.group_by.add().name = name
num_v4_filters = len(v3_query.filter)
if v3_query.HasField('ancestor'):
num_v4_filters += 1
if num_v4_filters == 1:
get_property_filter = self.__get_property_filter_from_v4
elif num_v4_filters >= 1:
v4_query.filter.composite_filter.operator = (
datastore_v4_pb2.CompositeFilter.AND)
get_property_filter = self.__add_property_filter_from_v4
if v3_query.HasField('ancestor'):
self._v3_query_to_v4_ancestor_filter(v3_query,
get_property_filter(v4_query))
for v3_filter in v3_query.filter:
self._v3_filter_to_v4_property_filter(v3_filter,
get_property_filter(v4_query))
for v3_order in v3_query.order:
self.v3_order_to_v4_order(v3_order, v4_query.order.add())
def __get_property_filter_from_v4(self, v4_query):
"""Returns the PropertyFilter from the query's top-level filter."""
return v4_query.filter.property_filter
def __add_property_filter_from_v4(self, v4_query):
"""Adds and returns a PropertyFilter from the query's composite filter."""
v4_comp_filter = v4_query.filter.composite_filter
return v4_comp_filter.filter.add().property_filter
def __populate_v3_filters_from_v4(self, v4_filter, v3_query):
"""Populates a filters for a v3 Query.
Args:
v4_filter: a datastore_v4_pb2.Filter
v3_query: a datastore_pb.Query to populate with filters
"""
if v4_filter.HasField('property_filter'):
v4_property_filter = v4_filter.property_filter
if (v4_property_filter.operator
== datastore_v4_pb2.PropertyFilter.HAS_ANCESTOR):
datastore_pbs.check_conversion(
v4_property_filter.value.HasField('key_value'),
'HAS_ANCESTOR requires a reference value')
datastore_pbs.check_conversion((v4_property_filter.property.name
== datastore_pbs.PROPERTY_NAME_KEY),
'unsupported property')
datastore_pbs.check_conversion(not v3_query.HasField('ancestor'),
'duplicate ancestor constraint')
self._entity_converter.v4_to_v3_reference(
v4_property_filter.value.key_value,
v3_query.ancestor)
else:
v3_filter = v3_query.filter.add()
property_name = v4_property_filter.property.name
v3_filter.op = v4_property_filter.operator
datastore_pbs.check_conversion(
not v4_property_filter.value.list_value,
('unsupported value type, %s, in property filter'
' on "%s"' % ('list_value', property_name)))
self._entity_converter.v4_to_v3_property(property_name,
False,
False,
v4_property_filter.value,
v3_filter.property.add())
elif v4_filter.HasField('composite_filter'):
datastore_pbs.check_conversion((v4_filter.composite_filter.operator
== datastore_v4_pb2.CompositeFilter.AND),
'unsupported composite property operator')
for v4_sub_filter in v4_filter.composite_filter.filter:
self.__populate_v3_filters_from_v4(v4_sub_filter, v3_query)
def v1_to_v3_compiled_cursor(self, v1_cursor, v3_compiled_cursor):
"""Converts a v1 cursor string to a v3 CompiledCursor.
Args:
v1_cursor: a string representing a v1 query cursor
v3_compiled_cursor: a datastore_pb.CompiledCursor to populate
"""
v3_compiled_cursor.Clear()
try:
v3_compiled_cursor.ParseFromString(v1_cursor)
except message.DecodeError:
raise datastore_pbs.InvalidConversionError('Invalid query cursor.')
def v3_to_v1_compiled_cursor(self, v3_compiled_cursor):
"""Converts a v3 CompiledCursor to a v1 cursor string.
Args:
v3_compiled_cursor: a datastore_pb.CompiledCursor
Returns:
a string representing a v1 query cursor
"""
return v3_compiled_cursor.SerializeToString()
def v1_to_v3_query(self, v1_partition_id, v1_query, v3_query):
"""Converts a v1 Query to a v3 Query.
Args:
v1_partition_id: a googledatastore.PartitionId
v1_query: a googledatastore.Query
v3_query: a datastore_pb.Query to populate
Raises:
InvalidConversionError if the query cannot be converted
"""
v3_query.Clear()
if v1_partition_id.project_id:
v3_query.app = v1_partition_id.project_id
if v1_partition_id.namespace_id:
v3_query.name_space = v1_partition_id.namespace_id
v3_query.persist_offset = True
v3_query.require_perfect_plan = True
v3_query.compile = True
if v1_query.HasField('limit'):
v3_query.limit = v1_query.limit.value
if v1_query.offset:
v3_query.offset = v1_query.offset
if v1_query.start_cursor:
self.v1_to_v3_compiled_cursor(v1_query.start_cursor,
v3_query.compiled_cursor)
if v1_query.end_cursor:
self.v1_to_v3_compiled_cursor(v1_query.end_cursor,
v3_query.end_compiled_cursor)
if v1_query.kind:
datastore_pbs.check_conversion(len(v1_query.kind) == 1,
'multiple kinds not supported')
v3_query.kind = v1_query.kind[0].name
has_key_projection = False
for prop in v1_query.projection:
if prop.property.name == datastore_pbs.PROPERTY_NAME_KEY:
has_key_projection = True
else:
v3_query.property_name.append(prop.property.name)
if has_key_projection and len(v3_query.property_name) == 0:
v3_query.keys_only = True
for prop in v1_query.distinct_on:
v3_query.group_by_property_name.append(prop.name)
self.__populate_v3_filters_from_v1(v1_query.filter, v3_query)
for v1_order in v1_query.order:
v3_order = v3_query.order.add()
v3_order.property = v1_order.property.name
if v1_order.direction:
v3_order.direction = v1_order.direction
def v3_to_v1_query(self, v3_query, v1_query):
"""Converts a v3 Query to a v1 Query.
Args:
v3_query: a datastore_pb.Query
v1_query: a googledatastore.Query to populate
Raises:
InvalidConversionError if the query cannot be converted
"""
v1_query.Clear()
datastore_pbs.check_conversion(not v3_query.HasField('distinct'),
'distinct option not supported')
datastore_pbs.check_conversion(v3_query.require_perfect_plan,
'non-perfect plans not supported')
if v3_query.HasField('limit'):
v1_query.limit.value = v3_query.limit
if v3_query.offset:
v1_query.offset = v3_query.offset
if v3_query.HasField('compiled_cursor'):
v1_query.start_cursor = (
self.v3_to_v1_compiled_cursor(v3_query.compiled_cursor))
if v3_query.HasField('end_compiled_cursor'):
v1_query.end_cursor = (
self.v3_to_v1_compiled_cursor(v3_query.end_compiled_cursor))
if v3_query.HasField('kind'):
v1_query.kind.add().name = v3_query.kind
for name in v3_query.property_name:
v1_query.projection.add().property.name = name
if v3_query.keys_only:
v1_query.projection.add().property.name = datastore_pbs.PROPERTY_NAME_KEY
for name in v3_query.group_by_property_name:
v1_query.distinct_on.add().name = name
num_v1_filters = len(v3_query.filter)
if v3_query.HasField('ancestor') or v3_query.shallow:
num_v1_filters += 1
if num_v1_filters == 1:
get_property_filter = self.__get_property_filter_from_v1
elif num_v1_filters >= 1:
v1_query.filter.composite_filter.operator = (
googledatastore.CompositeFilter.AND)
get_property_filter = self.__add_property_filter_from_V1
if v3_query.HasField('ancestor') or v3_query.shallow:
self._v3_query_to_v1_ancestor_filter(v3_query,
get_property_filter(v1_query))
for v3_filter in v3_query.filter:
self._v3_filter_to_v1_property_filter(v3_filter,
get_property_filter(v1_query))
for v3_order in v3_query.order:
self.v3_order_to_v1_order(v3_order, v1_query.order.add())
def __get_property_filter_from_v1(self, v1_query):
"""Returns the PropertyFilter from the query's top-level filter."""
return v1_query.filter.add().property_filter
def __add_property_filter_from_v1(self, v1_query):
"""Adds and returns a PropertyFilter from the query's composite filter."""
v1_comp_filter = v1_query.filter.composite_filter
return v1_comp_filter.filter.add().property_filter
def __populate_v3_filters_from_v1(self, v1_filter, v3_query):
"""Populates a filters for a v3 Query.
Args:
v1_filter: a googledatastore.Filter
v3_query: a datastore_pb.Query to populate with filters
"""
filter_type = v1_filter.WhichOneof('filter_type')
if filter_type == 'property_filter':
v1_property_filter = v1_filter.property_filter
v1_property_name = v1_property_filter.property.name
if (v1_property_filter.op == googledatastore.PropertyFilter.HAS_PARENT or
v1_property_filter.op == googledatastore.PropertyFilter.HAS_ANCESTOR):
if v1_property_filter.op == googledatastore.PropertyFilter.HAS_PARENT:
datastore_pbs.check_conversion(
v1_property_filter.value.HasField('key_value') or
v1_property_filter.value.HasField('null_value'),
'HAS_PARENT requires a key value or null')
else:
datastore_pbs.check_conversion(
v1_property_filter.value.HasField('key_value'),
'HAS_ANCESTOR requires a key value')
datastore_pbs.check_conversion((v1_property_name
== datastore_pbs.PROPERTY_NAME_KEY),
'unsupported property')
datastore_pbs.check_conversion(not v3_query.HasField('ancestor') and
not v3_query.shallow,
'duplicate ancestor or parent '
'constraint')
if v1_property_filter.value.HasField('key_value'):
self._entity_converter.v1_to_v3_reference(
v1_property_filter.value.key_value,
v3_query.ancestor)
if v1_property_filter.op == googledatastore.PropertyFilter.HAS_PARENT:
v3_query.shallow = True
else:
v3_filter = v3_query.filter.add()
property_name = v1_property_name
v3_filter.op = v1_property_filter.op
datastore_pbs.check_conversion(
not v1_property_filter.value.HasField('array_value'),
('unsupported value type, %s, in property filter'
' on "%s"' % ('array_value', property_name)))
self._entity_converter.v1_to_v3_property(property_name,
False,
False,
v1_property_filter.value,
v3_filter.property.add())
elif filter_type == 'composite_filter':
datastore_pbs.check_conversion((v1_filter.composite_filter.op
== googledatastore.CompositeFilter.AND),
'unsupported composite property operator')
for v1_sub_filter in v1_filter.composite_filter.filters:
self.__populate_v3_filters_from_v1(v1_sub_filter, v3_query)
def get_query_converter(id_resolver=None):
"""Returns a converter for v3 and v1 queries (not suitable for production).
This converter is suitable for use in stubs but not for production.
Returns:
a StubQueryConverter
"""
return StubQueryConverter(datastore_pbs.get_entity_converter(id_resolver))
class StubServiceConverter(object):
"""Converter for v3/v4/v1 request/response protos suitable for use in stubs.
"""
def __init__(self, entity_converter, query_converter):
self._entity_converter = entity_converter
self._query_converter = query_converter
def v1_to_v3_cursor(self, v1_query_handle, v3_cursor):
"""Converts a v1 cursor string to a v3 Cursor.
Args:
v1_query_handle: a string representing a v1 query handle
v3_cursor: a datastore_pb.Cursor to populate
"""
try:
v3_cursor.ParseFromString(v1_query_handle)
except message.DecodeError:
raise datastore_pbs.InvalidConversionError('Invalid query handle.')
return v3_cursor
def _v3_to_v1_query_handle(self, v3_cursor):
"""Converts a v3 Cursor to a v1 query handle string.
Args:
v3_cursor: a datastore_pb.Cursor
Returns:
a string representing a v1 cursor
"""
return v3_cursor.SerializeToString()
def v1_to_v3_txn(self, v1_txn, v3_txn):
"""Converts a v1 transaction string to a v3 Transaction.
Args:
v1_txn: a string representing a v1 transaction
v3_txn: a datastore_pb.Transaction() to populate
"""
try:
v3_txn.ParseFromString(v1_txn)
except message.DecodeError:
raise datastore_pbs.InvalidConversionError('Invalid transaction.')
return v3_txn
def _v3_to_v1_txn(self, v3_txn):
"""Converts a v3 Transaction to a v1 transaction string.
Args:
v3_txn: a datastore_pb.Transaction()
Returns:
a string representing a v1 transaction
"""
return v3_txn.SerializeToString()
def v1_to_v3_begin_transaction_req(self, app_id, v1_req):
"""Converts a v1 BeginTransactionRequest to a v3 BeginTransactionRequest.
Args:
app_id: app id
v1_req: a googledatastore.BeginTransactionRequest
Returns:
a datastore_pb.BeginTransactionRequest
"""
v3_req = datastore_pb.BeginTransactionRequest()
v3_req.app = app_id
v3_req.allow_multiple_eg = True
if v1_req.transaction_options.HasField('read_only'):
v3_req.mode = datastore_pb.BeginTransactionRequest.READ_ONLY
elif v1_req.transaction_options.HasField('read_write'):
v3_req.mode = datastore_pb.BeginTransactionRequest.READ_WRITE
return v3_req
def v3_to_v1_begin_transaction_resp(self, v3_resp):
"""Converts a v3 Transaction to a v1 BeginTransactionResponse.
Args:
v3_resp: a datastore_pb.Transaction()
Returns:
a googledatastore.BeginTransactionResponse
"""
v1_resp = googledatastore.BeginTransactionResponse()
v1_resp.transaction = self._v3_to_v1_txn(v3_resp)
return v1_resp
def v1_rollback_req_to_v3_txn(self, v1_req):
"""Converts a v1 RollbackRequest to a v3 Transaction.
Args:
v1_req: a googledatastore.RollbackRequest
Returns:
a datastore_pb.Transaction()
"""
v3_txn = datastore_pb.Transaction()
self.v1_to_v3_txn(v1_req.transaction, v3_txn)
return v3_txn
def v1_commit_req_to_v3_txn(self, v1_req):
"""Converts a v1 CommitRequest to a v3 Transaction.
Args:
v1_req: a googledatastore.CommitRequest
Returns:
a datastore_pb.Transaction()
"""
v3_txn = datastore_pb.Transaction()
self.v1_to_v3_txn(v1_req.transaction, v3_txn)
return v3_txn
def v1_run_query_req_to_v3_query(self, v1_req, new_txn=None):
"""Converts a v1 RunQueryRequest to a v3 Query.
GQL is not supported.
Args:
v1_req: a googledatastore.RunQueryRequest
new_txn: a v1 transaction created ad-hoc for this query, or None.
Returns:
a datastore_pb.Query
"""
consistency_type = v1_req.read_options.WhichOneof('consistency_type')
datastore_pbs.check_conversion(not v1_req.HasField('gql_query'),
'GQL not supported')
if (new_txn is None) == (consistency_type == 'new_transaction'):
raise datastore_errors.InternalError('new_txn should be set only if the '
'consistency type is '
'new_transaction')
v3_query = datastore_pb.Query()
self._query_converter.v1_to_v3_query(v1_req.partition_id, v1_req.query,
v3_query)
if consistency_type == 'transaction':
self.v1_to_v3_txn(v1_req.read_options.transaction,
v3_query.transaction)
elif consistency_type == 'new_transaction':
self.v1_to_v3_txn(new_txn, v3_query.transaction)
elif consistency_type == 'read_consistency':
read_consistency = v1_req.read_options.read_consistency
if read_consistency == googledatastore.ReadOptions.EVENTUAL:
v3_query.strong = False
v3_query.failover_ms = -1
elif read_consistency == googledatastore.ReadOptions.STRONG:
v3_query.strong = True
elif (read_consistency !=
googledatastore.ReadOptions.READ_CONSISTENCY_UNSPECIFIED):
raise datastore_errors.InternalError('Unknown read_consistency %d'
% read_consistency)
elif consistency_type is not None:
raise datastore_errors.InternalError('Unknown consistency_type: %s'
% consistency_type)
return v3_query
def v3_to_v1_run_query_req(self, v3_req):
"""Converts a v3 Query to a v1 RunQueryRequest.
Args:
v3_req: a datastore_pb.Query
Returns:
a googledatastore.RunQueryRequest
"""
v1_req = googledatastore.RunQueryRequest()
v1_partition_id = v1_req.partition_id
v1_partition_id.project_id = v3_req.app
if v3_req.name_space:
v1_partition_id.namespace = v3_req.name_space
if v3_req.HasField('transaction'):
v1_req.read_options.transaction = self._v3_to_v1_txn(v3_req.transaction)
elif v3_req.strong:
v1_req.read_options.read_consistency = (
googledatastore.ReadOptions.STRONG)
elif v3_req.HasField('strong'):
v1_req.read_options.read_consistency = (
googledatastore.ReadOptions.EVENTUAL)
self._query_converter.v3_to_v1_query(v3_req, v1_req.query)
return v1_req
def v1_run_query_resp_to_v3_query_result(self, v1_resp):
"""Converts a V4 RunQueryResponse to a v3 QueryResult.
Args:
v1_resp: a googledatastore.QueryResult
Returns:
a datastore_pb.QueryResult
"""
v3_resp = self.v1_to_v3_query_result(v1_resp.batch)
return v3_resp
def v3_to_v1_run_query_resp(self, v3_resp, new_txn=None):
"""Converts a v3 QueryResult to a V4 RunQueryResponse.
Args:
v3_resp: a datastore_pb.QueryResult
new_txn: optional, a transaction that was created when processing the
RunQueryRequest.
Returns:
a googledatastore.RunQueryResponse
"""
v1_resp = googledatastore.RunQueryResponse()
self.v3_to_v1_query_result_batch(v3_resp, v1_resp.batch)
if new_txn:
v1_resp.transaction = new_txn
return v1_resp
def v1_to_v3_get_req(self, v1_req, new_txn=None):
"""Converts a v1 LookupRequest to a v3 GetRequest.
Args:
v1_req: a googledatastore.LookupRequest
new_txn: a v1 transaction created ad-hoc for this lookup, or None.
Returns:
a datastore_pb.GetRequest
"""
consistency_type = v1_req.read_options.WhichOneof('consistency_type')
if (new_txn is None) == (consistency_type == 'new_transaction'):
raise datastore_errors.InternalError('new_txn should be set only if the '
'consistency type is '
'new_transaction')
v3_req = datastore_pb.GetRequest()
v3_req.allow_deferred = True
if consistency_type == 'transaction':
self.v1_to_v3_txn(v1_req.read_options.transaction, v3_req.transaction)
elif consistency_type == 'new_transaction':
self.v1_to_v3_txn(new_txn, v3_req.transaction)
elif consistency_type == 'read_consistency':
read_consistency = v1_req.read_options.read_consistency
if read_consistency == googledatastore.ReadOptions.EVENTUAL:
v3_req.strong = False
v3_req.failover_ms = -1
elif read_consistency == googledatastore.ReadOptions.STRONG:
v3_req.strong = True
elif (read_consistency !=
googledatastore.ReadOptions.READ_CONSISTENCY_UNSPECIFIED):
raise datastore_errors.InternalError('Unknown read_consistency %d'
% read_consistency)
elif consistency_type is not None:
raise datastore_errors.InternalError('Unknown consistency_type: %s'
% consistency_type)
for v1_key in v1_req.keys:
self._entity_converter.v1_to_v3_reference(v1_key, v3_req.key.add())
return v3_req
def v3_to_v1_lookup_resp(self, v3_resp, new_txn=None):
"""Converts a v3 GetResponse to a v1 LookupResponse.
Args:
v3_resp: a datastore_pb.GetResponse
new_txn: a v1 transaction created ad-hoc for this lookup, or None.
Returns:
a googledatastore.LookupResponse
"""
v1_resp = googledatastore.LookupResponse()
if new_txn:
v1_resp.transaction = new_txn
for v3_ref in v3_resp.deferred:
self._entity_converter.v3_to_v1_key(v3_ref, v1_resp.deferred.add())
for v3_entity in v3_resp.entity:
if v3_entity.HasField('entity'):
v1_entity_result = v1_resp.found.add()
self._entity_converter.v3_to_v1_entity(v3_entity.entity,
v1_entity_result.entity)
if v3_entity.HasField('key'):
v1_entity_result = v1_resp.missing.add()
self._entity_converter.v3_to_v1_key(v3_entity.key,
v1_entity_result.entity.key)
if v3_entity.HasField('version'):
v1_entity_result.version = v3_entity.version
return v1_resp
def v1_to_v3_query_result(self, v1_batch):
"""Converts a v1 QueryResultBatch to a v3 QueryResult.
Args:
v1_batch: a googledatastore.QueryResultBatch
Returns:
a datastore_pb.QueryResult
"""
v3_result = datastore_pb.QueryResult()
v3_result.more_results = (
(v1_batch.more_results
== googledatastore.QueryResultBatch.NOT_FINISHED))
if v1_batch.end_cursor:
self._query_converter.v1_to_v3_compiled_cursor(
v1_batch.end_cursor, v3_result.compiled_cursor)
if v1_batch.skipped_cursor:
self._query_converter.v1_to_v3_compiled_cursor(
v1_batch.skipped_cursor,
v3_result.skipped_results_compiled_cursor)
if v1_batch.entity_result_type == googledatastore.EntityResult.PROJECTION:
v3_result.index_only = True
elif v1_batch.entity_result_type == googledatastore.EntityResult.KEY_ONLY:
v3_result.keys_only = True
if v1_batch.skipped_results:
v3_result.skipped_results = v1_batch.skipped_results
for v1_entity_result in v1_batch.entity_result:
v3_entity = v3_result.result.add()
self._entity_converter.v1_to_v3_entity(v1_entity_result.entity, v3_entity)
if v1_entity_result.cursor:
cursor = v3_result.result_compiled_cursor.add()
self._query_converter.v1_to_v3_compiled_cursor(v1_entity_result.cursor,
cursor)
if v1_batch.entity_result_type != googledatastore.EntityResult.FULL:
v3_entity.ClearField('entity_group')
return v3_result
def v3_to_v1_query_result_batch(self, v3_result, v1_batch):
"""Converts a v3 QueryResult to a v1 QueryResultBatch.
Args:
v3_result: a datastore_pb.QueryResult
v1_batch: a googledatastore.QueryResultBatch to populate
"""
v1_batch.Clear()
if v3_result.more_results:
v1_batch.more_results = googledatastore.QueryResultBatch.NOT_FINISHED
else:
v1_batch.more_results = (
googledatastore.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT)
if v3_result.HasField('compiled_cursor'):
v1_batch.end_cursor = (
self._query_converter.v3_to_v1_compiled_cursor(
v3_result.compiled_cursor))
if v3_result.HasField('skipped_results_compiled_cursor'):
v1_batch.skipped_cursor = (
self._query_converter.v3_to_v1_compiled_cursor(
v3_result.skipped_results_compiled_cursor))
if v3_result.keys_only:
v1_batch.entity_result_type = googledatastore.EntityResult.KEY_ONLY
elif v3_result.index_only:
v1_batch.entity_result_type = googledatastore.EntityResult.PROJECTION
else:
v1_batch.entity_result_type = googledatastore.EntityResult.FULL
if v3_result.HasField('skipped_results'):
v1_batch.skipped_results = v3_result.skipped_results
for v3_entity, v3_version, v3_cursor in zip_longest(
v3_result.result, v3_result.version, v3_result.result_compiled_cursor):
v1_entity_result = v1_batch.entity_results.add()
self._entity_converter.v3_to_v1_entity(v3_entity,
v1_entity_result.entity)
if v3_version is not None:
v1_entity_result.version = v3_version
if v3_cursor is not None:
v1_entity_result.cursor = (
self._query_converter.v3_to_v1_compiled_cursor(v3_cursor))
def v4_to_v3_cursor(self, v4_query_handle, v3_cursor):
"""Converts a v4 cursor string to a v3 Cursor.
Args:
v4_query_handle: a string representing a v4 query handle
v3_cursor: a datastore_pb.Cursor to populate
"""
try:
v3_cursor.ParseFromString(v4_query_handle)
except message.DecodeError:
raise datastore_pbs.InvalidConversionError('Invalid query handle.')
return v3_cursor
def _v3_to_v4_query_handle(self, v3_cursor):
"""Converts a v3 Cursor to a v4 query handle string.
Args:
v3_cursor: a datastore_pb.Cursor
Returns:
a string representing a v4 cursor
"""
return v3_cursor.SerializeToString()
def v4_to_v3_txn(self, v4_txn, v3_txn):
"""Converts a v4 transaction string to a v3 Transaction.
Args:
v4_txn: a string representing a v4 transaction
v3_txn: a datastore_pb.Transaction() to populate
"""
try:
v3_txn.ParseFromString(v4_txn)
except message.DecodeError:
raise datastore_pbs.InvalidConversionError('Invalid transaction.')
return v3_txn
def _v3_to_v4_txn(self, v3_txn):
"""Converts a v3 Transaction to a v4 transaction string.
Args:
v3_txn: a datastore_pb.Transaction()
Returns:
a string representing a v4 transaction
"""
return v3_txn.SerializeToString()
def v4_to_v3_begin_transaction_req(self, app_id, v4_req):
"""Converts a v4 BeginTransactionRequest to a v3 BeginTransactionRequest.
Args:
app_id: app id
v4_req: a datastore_v4_pb2.BeginTransactionRequest
Returns:
a datastore_pb.BeginTransactionRequest
"""
v3_req = datastore_pb.BeginTransactionRequest()
v3_req.app = app_id
v3_req.allow_multiple_eg = v4_req.cross_group
return v3_req
def v3_to_v4_begin_transaction_resp(self, v3_resp):
"""Converts a v3 Transaction to a v4 BeginTransactionResponse.
Args:
v3_resp: a datastore_pb.Transaction()
Returns:
a datastore_v4_pb2.BeginTransactionResponse
"""
v4_resp = datastore_v4_pb2.BeginTransactionResponse()
v4_resp.transaction = self._v3_to_v4_txn(v3_resp)
return v4_resp
def v4_rollback_req_to_v3_txn(self, v4_req):
"""Converts a v4 RollbackRequest to a v3 Transaction.
Args:
v4_req: a datastore_v4_pb2.RollbackRequest
Returns:
a datastore_pb.Transaction()
"""
v3_txn = datastore_pb.Transaction()
self.v4_to_v3_txn(v4_req.transaction, v3_txn)
return v3_txn
def v4_commit_req_to_v3_txn(self, v4_req):
"""Converts a v4 CommitRequest to a v3 Transaction.
Args:
v4_req: a datastore_v4_pb2.CommitRequest
Returns:
a datastore_pb.Transaction()
"""
v3_txn = datastore_pb.Transaction()
self.v4_to_v3_txn(v4_req.transaction, v3_txn)
return v3_txn
def v4_run_query_req_to_v3_query(self, v4_req):
"""Converts a v4 RunQueryRequest to a v3 Query.
GQL is not supported.
Args:
v4_req: a datastore_v4_pb2.RunQueryRequest
Returns:
a datastore_pb.Query
"""
datastore_pbs.check_conversion(not v4_req.HasField('gql_query'),
'GQL not supported')
v3_query = datastore_pb.Query()
self._query_converter.v4_to_v3_query(v4_req.partition_id, v4_req.query,
v3_query)
if v4_req.HasField('suggested_batch_size'):
v3_query.count = v4_req.suggested_batch_size
read_options = v4_req.read_options
if read_options.HasField('transaction'):
self.v4_to_v3_txn(read_options.transaction, v3_query.transaction)
elif (read_options.read_consistency
== datastore_v4_pb2.ReadOptions.EVENTUAL):
v3_query.strong = False
v3_query.failover_ms = -1
elif read_options.read_consistency == datastore_v4_pb2.ReadOptions.STRONG:
v3_query.strong = True
if v4_req.HasField('min_safe_time_seconds'):
v3_query.min_safe_time_seconds = v4_req.min_safe_time_seconds
return v3_query
def v3_to_v4_run_query_req(self, v3_req):
"""Converts a v3 Query to a v4 RunQueryRequest.
Args:
v3_req: a datastore_pb.Query
Returns:
a datastore_v4_pb2.RunQueryRequest
"""
v4_req = datastore_v4_pb2.RunQueryRequest()
v4_partition_id = v4_req.partition_id
v4_partition_id.dataset_id = v3_req.app
if v3_req.name_space:
v4_partition_id.namespace = v3_req.name_space
if v3_req.HasField('count'):
v4_req.suggested_batch_size = v3_req.count
if v3_req.HasField('transaction'):
v4_req.read_options.transaction = (
self._v3_to_v4_txn(v3_req.transaction))
elif v3_req.strong:
v4_req.read_options.read_consistency = (
datastore_v4_pb2.ReadOptions.STRONG)
elif v3_req.HasField('strong'):
v4_req.read_options.read_consistency = (
datastore_v4_pb2.ReadOptions.EVENTUAL)
if v3_req.HasField('min_safe_time_seconds'):
v4_req.min_safe_time_seconds = v3_req.min_safe_time_seconds
self._query_converter.v3_to_v4_query(v3_req, v4_req.query)
return v4_req
def v4_run_query_resp_to_v3_query_result(self, v4_resp):
"""Converts a V4 RunQueryResponse to a v3 QueryResult.
Args:
v4_resp: a datastore_v4_pb2.QueryResult
Returns:
a datastore_pb.QueryResult
"""
v3_resp = self.v4_to_v3_query_result(v4_resp.batch)
if v4_resp.HasField('query_handle'):
self.v4_to_v3_cursor(v4_resp.query_handle, v3_resp.cursor)
return v3_resp
def v3_to_v4_run_query_resp(self, v3_resp):
"""Converts a v3 QueryResult to a V4 RunQueryResponse.
Args:
v3_resp: a datastore_pb.QueryResult
Returns:
a datastore_v4_pb2.RunQueryResponse
"""
v4_resp = datastore_v4_pb2.RunQueryResponse()
self.v3_to_v4_query_result_batch(v3_resp, v4_resp.batch)
if v3_resp.HasField('cursor'):
v4_resp.query_handle = (
self._query_converter.v3_to_v4_compiled_cursor(v3_resp.cursor))
return v4_resp
def v4_to_v3_next_req(self, v4_req):
"""Converts a v4 ContinueQueryRequest to a v3 NextRequest.
Args:
v4_req: a datastore_v4_pb2.ContinueQueryRequest
Returns:
a datastore_pb.NextRequest
"""
v3_req = datastore_pb.NextRequest()
v3_req.compile = True
self.v4_to_v3_cursor(v4_req.query_handle, v3_req.cursor)
return v3_req
def v3_to_v4_continue_query_resp(self, v3_resp):
"""Converts a v3 QueryResult to a v4 ContinueQueryResponse.
Args:
v3_resp: a datastore_pb.QueryResult
Returns:
a datastore_v4_pb2.ContinueQueryResponse
"""
v4_resp = datastore_v4_pb2.ContinueQueryResponse()
self.v3_to_v4_query_result_batch(v3_resp, v4_resp.batch)
return v4_resp
def v4_to_v3_get_req(self, v4_req):
"""Converts a v4 LookupRequest to a v3 GetRequest.
Args:
v4_req: a datastore_v4_pb2.LookupRequest
Returns:
a datastore_pb.GetRequest
"""
v3_req = datastore_pb.GetRequest()
v3_req.allow_deferred = True
if v4_req.read_options.HasField('transaction'):
self.v4_to_v3_txn(v4_req.read_options.transaction,
v3_req.transaction)
elif (v4_req.read_options.read_consistency
== datastore_v4_pb2.ReadOptions.EVENTUAL):
v3_req.strong = False
v3_req.failover_ms = -1
elif (v4_req.read_options.read_consistency
== datastore_v4_pb2.ReadOptions.STRONG):
v3_req.strong = True
for v4_key in v4_req.key:
self._entity_converter.v4_to_v3_reference(v4_key, v3_req.key.add())
return v3_req
def v3_to_v4_lookup_resp(self, v3_resp):
"""Converts a v3 GetResponse to a v4 LookupResponse.
Args:
v3_resp: a datastore_pb.GetResponse
Returns:
a datastore_v4_pb2.LookupResponse
"""
v4_resp = datastore_v4_pb2.LookupResponse()
for v3_ref in v3_resp.deferred:
self._entity_converter.v3_to_v4_key(v3_ref, v4_resp.deferred.add())
for v3_entity in v3_resp.entity:
if v3_entity.HasField('entity'):
self._entity_converter.v3_to_v4_entity(
v3_entity.entity,
v4_resp.found.add().entity)
if v3_entity.HasField('key'):
self._entity_converter.v3_to_v4_key(
v3_entity.key,
v4_resp.missing.add().entity.key)
return v4_resp
def v4_to_v3_query_result(self, v4_batch):
"""Converts a v4 QueryResultBatch to a v3 QueryResult.
Args:
v4_batch: a datastore_v4_pb2.QueryResultBatch
Returns:
a datastore_pb.QueryResult
"""
v3_result = datastore_pb.QueryResult()
v3_result.more_results = (
(v4_batch.more_results
== datastore_v4_pb2.QueryResultBatch.NOT_FINISHED))
if v4_batch.HasField('end_cursor'):
self._query_converter.v4_to_v3_compiled_cursor(
v4_batch.end_cursor, v3_result.compiled_cursor)
if v4_batch.HasField('skipped_cursor'):
self._query_converter.v4_to_v3_compiled_cursor(
v4_batch.skipped_cursor,
v3_result.skipped_results_compiled_cursor)
if v4_batch.entity_result_type == datastore_v4_pb2.EntityResult.PROJECTION:
v3_result.index_only = True
elif v4_batch.entity_result_type == datastore_v4_pb2.EntityResult.KEY_ONLY:
v3_result.keys_only = True
if v4_batch.HasField('skipped_results'):
v3_result.skipped_results = v4_batch.skipped_results
for v4_entity in v4_batch.entity_result:
v3_entity = v3_result.result.add()
self._entity_converter.v4_to_v3_entity(v4_entity.entity, v3_entity)
if v4_entity.HasField('cursor'):
cursor = v3_result.result_compiled_cursor.add()
self._query_converter.v4_to_v3_compiled_cursor(v4_entity.cursor,
cursor)
if v4_batch.entity_result_type != datastore_v4_pb2.EntityResult.FULL:
v3_entity.ClearField('entity_group')
return v3_result
def v3_to_v4_query_result_batch(self, v3_result, v4_batch):
"""Converts a v3 QueryResult to a v4 QueryResultBatch.
Args:
v3_result: a datastore_pb.QueryResult
v4_batch: a datastore_v4_pb2.QueryResultBatch to populate
"""
v4_batch.Clear()
if v3_result.more_results:
v4_batch.more_results = datastore_v4_pb2.QueryResultBatch.NOT_FINISHED
else:
v4_batch.more_results = (
datastore_v4_pb2.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT)
if v3_result.HasField('compiled_cursor'):
v4_batch.end_cursor = (
self._query_converter.v3_to_v4_compiled_cursor(
v3_result.compiled_cursor))
if v3_result.HasField('skipped_results_compiled_cursor'):
v4_batch.skipped_cursor = (
self._query_converter.v3_to_v4_compiled_cursor(
v3_result.skipped_results_compiled_cursor))
if v3_result.keys_only:
v4_batch.entity_result_type = datastore_v4_pb2.EntityResult.KEY_ONLY
elif v3_result.index_only:
v4_batch.entity_result_type = datastore_v4_pb2.EntityResult.PROJECTION
else:
v4_batch.entity_result_type = datastore_v4_pb2.EntityResult.FULL
if v3_result.HasField('skipped_results'):
v4_batch.skipped_results = v3_result.skipped_results
for v3_entity, v3_cursor in zip_longest(v3_result.result,
v3_result.result_compiled_cursor):
v4_entity_result = datastore_v4_pb2.EntityResult()
self._entity_converter.v3_to_v4_entity(v3_entity,
v4_entity_result.entity)
if v3_cursor is not None:
v4_entity_result.cursor = (
self._query_converter.v3_to_v4_compiled_cursor(v3_cursor))
v4_batch.entity_result.append(v4_entity_result)
def get_service_converter(id_resolver=None):
"""Returns a converter for v3 and v1 service request/response protos.
This converter is suitable for use in stubs but not for production.
Returns:
a StubServiceConverter
"""
query_converter = get_query_converter(id_resolver)
return StubServiceConverter(query_converter.get_entity_converter(),
query_converter)
def ReverseBitsInt64(v):
"""Reverse the bits of a 64-bit integer.
Args:
v: Input integer of type 'int' or 'long'.
Returns:
Bit-reversed input as 'int' on 64-bit machines or as 'long' otherwise.
"""
v = ((v >> 1) & 0x5555555555555555) | ((v & 0x5555555555555555) << 1)
v = ((v >> 2) & 0x3333333333333333) | ((v & 0x3333333333333333) << 2)
v = ((v >> 4) & 0x0F0F0F0F0F0F0F0F) | ((v & 0x0F0F0F0F0F0F0F0F) << 4)
v = ((v >> 8) & 0x00FF00FF00FF00FF) | ((v & 0x00FF00FF00FF00FF) << 8)
v = ((v >> 16) & 0x0000FFFF0000FFFF) | ((v & 0x0000FFFF0000FFFF) << 16)
v = int((v >> 32) | (v << 32) & 0xFFFFFFFFFFFFFFFF)
return v
def ToScatteredId(v):
"""Map counter value v to the scattered ID space.
Translate to scattered ID space, then reverse bits.
Args:
v: Counter value from which to produce ID.
Returns:
Integer ID.
Raises:
datastore_errors.BadArgumentError if counter value exceeds the range of
the scattered ID space.
"""
if v >= _MAX_SCATTERED_COUNTER:
raise datastore_errors.BadArgumentError('counter value too large (%d)' % v)
return _MAX_SEQUENTIAL_ID + 1 + long(ReverseBitsInt64(v << _SCATTER_SHIFT))
def IdToCounter(k):
"""Map ID k to the counter value from which it was generated.
Determine whether k is sequential or scattered ID.
Args:
k: ID from which to infer counter value.
Returns:
Tuple of integers (counter_value, id_space).
"""
if k > _MAX_SCATTERED_ID:
return 0, SCATTERED
elif k > _MAX_SEQUENTIAL_ID and k <= _MAX_SCATTERED_ID:
return int(ReverseBitsInt64(k) >> _SCATTER_SHIFT), SCATTERED
elif k > 0:
return int(k), SEQUENTIAL
else:
raise datastore_errors.BadArgumentError('invalid id (%d)' % k)
def CompareEntityPbByKey(a, b):
"""Compare two entity protobuf's by key.
Args:
a: entity_pb2.EntityProto to compare
b: entity_pb2.EntityProto to compare
Returns:
<0 if a's key is before b's, =0 if they are the same key, and >0 otherwise.
"""
return cmp_compat.cmp(
datastore_types.Key._FromPb(a.key), datastore_types.Key._FromPb(b.key))
def NormalizeCursors(query, first_sort_direction):
"""Normalizes compiled cursors in place.
Any position specified in the position group is moved to either the
postfix_position or absolute_position field and the position group is
cleared.
If the cursor position does not specify before_ascending, populate it.
If before_ascending is already populated, use it and the sort direction
from the query to set an appropriate value for before.
Args:
query: datastore_pb.Query
first_sort_direction: first sort direction as returned by _GuessOrders
"""
_NormalizeCursor(query.compiled_cursor, first_sort_direction)
_NormalizeCursor(query.end_compiled_cursor, first_sort_direction)
def _GuessOrders(filters, orders):
"""Guess any implicit ordering.
The datastore gives a logical, but not necessarily predictable, ordering when
orders are not completely explicit. This function guesses at that ordering
(which is better then always ordering by __key__ for tests).
Args:
filters: The datastore_pb.Query.Filter that have already been normalized and
checked.
orders: The datastore_pb.Query.Order that have already been normalized and
checked. Mutated in place.
"""
orders = orders[:]
if not orders:
for filter_pb in filters:
if filter_pb.op in datastore_index.INEQUALITY_OPERATORS:
order = datastore_pb.Query.Order()
order.property = filter_pb.property[0].name
orders.append(order)
break
exists_props = (
filter_pb.property[0].name
for filter_pb in filters
if filter_pb.op == datastore_pb.Query.Filter.EXISTS)
for prop in sorted(exists_props):
order = datastore_pb.Query.Order()
order.property = prop
orders.append(order)
if not orders or orders[-1].property != '__key__':
order = datastore_pb.Query.Order()
order.property = '__key__'
orders.append(order)
return orders
def _MakeQuery(query_pb, filters, orders):
"""Make a datastore_query.Query for the given datastore_pb.Query.
Overrides filters and orders in query with the specified arguments.
Args:
query_pb: a datastore_pb.Query.
filters: the filters from query.
orders: the orders from query.
Returns:
A datastore_query.Query for the datastore_pb.Query."""
clone_pb = datastore_pb.Query()
clone_pb.CopyFrom(query_pb)
clone_pb.ClearField('filter')
clone_pb.ClearField('order')
clone_pb.filter.extend(filters)
clone_pb.order.extend(orders)
return datastore_query.Query._from_pb(clone_pb)
def _CreateIndexEntities(entity, postfix_props):
"""Creates entities for index values that would appear in prodcution.
This function finds all multi-valued properties listed in split_props, and
creates a new entity for each unique combination of values. The resulting
entities will only have a single value for each property listed in
split_props.
It reserves the right to include index data that would not be
seen in production, e.g. by returning the original entity when no splitting
is needed. LoadEntity will remove any excess fields.
This simulates the results seen by an index scan in the datastore.
Args:
entity: The entity_pb2.EntityProto to split.
split_props: A set of property names to split on.
Returns:
A list of the split entity_pb2.EntityProtos.
"""
to_split = {}
split_required = False
base_props = []
for prop in entity.property:
if prop.name in postfix_props:
values = to_split.get(prop.name)
if values is None:
values = []
to_split[prop.name] = values
else:
split_required = True
if prop.value not in values:
values.append(prop.value)
else:
base_props.append(prop)
if not split_required:
return [entity]
clone = entity_pb2.EntityProto()
clone.CopyFrom(entity)
clone.ClearField('property')
clone.property.extend(base_props)
results = [clone]
for name, splits in six.iteritems(to_split):
if len(splits) == 1:
for result in results:
prop = result.property.add()
prop.name = name
prop.multiple = False
prop.meaning = entity_pb2.Property.INDEX_VALUE
prop.value.CopyFrom(splits[0])
continue
new_results = []
for result in results:
for split in splits:
clone = entity_pb2.EntityProto()
clone.CopyFrom(result)
prop = clone.property.add()
prop.name = name
prop.multiple = False
prop.meaning = entity_pb2.Property.INDEX_VALUE
prop.value.CopyFrom(split)
new_results.append(clone)
results = new_results
return results
def _CreateIndexOnlyQueryResults(records, postfix_props):
"""Creates a result set similar to that returned by an index only query."""
new_records = []
for record in records:
index_entities = _CreateIndexEntities(record.entity, postfix_props)
new_records.extend([EntityRecord(e, record.metadata)
for e in index_entities])
return new_records
def _ExecuteQuery(results, query, filters, orders, index_list):
"""Executes the query on a superset of its results.
Args:
results: superset of results for query, list of EntityRecord.
query: a datastore_pb.Query.
filters: the filters from query.
orders: the orders from query.
index_list: the list of indexes used by the query.
Returns:
A ListCursor over the results of applying query to results.
"""
orders = _GuessOrders(filters, orders)
NormalizeCursors(query, orders[0].direction)
dsquery = _MakeQuery(query, filters, orders)
if query.property_name:
order_properties = set(order.property for order in orders)
results = _CreateIndexOnlyQueryResults(results, order_properties)
filtered_results = datastore_query.apply_query(dsquery, results,
lambda r: r.entity)
return ListCursor(query, dsquery, orders, index_list, filtered_results)
def _UpdateCost(cost, entity_writes, index_writes):
"""Updates the provided cost.
Args:
cost: Out param. The cost object to update.
entity_writes: The number of entity writes to add.
index_writes: The number of index writes to add.
"""
cost.entity_writes = cost.entity_writes + entity_writes
cost.index_writes = cost.index_writes + index_writes
def _PropertyListToMultimap(property_list):
"""Transforms a list of property protobufs into a multimap keyed by name.
The order of repeated properties is respected in the result. The returned
multimap can be compared directly for entity equality testing.
Args:
property_list: a list of datastore_pbs.Property protobufs.
Returns:
a map of property names to list of datastore_pbs.Property with that name.
"""
result = collections.defaultdict(list)
for prop in property_list:
result[prop.name].append(prop)
return result
def _IsNoOpWrite(old_entity, new_entity):
"""Returns whether putting an entity is a no-op given its current value.
Args:
old_entity: the current entity in the datastore, or None if the entity does
not exist.
new_entity: the new entity to store, or None if we are deleting the entity.
Note that the two entities must share the same key.
Returns:
True iff both the previous and new entities are None or are equivalent (have
the same properties and property values).
"""
if old_entity is not None and new_entity is not None:
old_raw_properties = _PropertyListToMultimap(old_entity.raw_property)
new_raw_properties = _PropertyListToMultimap(new_entity.raw_property)
old_properties = _PropertyListToMultimap(old_entity.property)
new_properties = _PropertyListToMultimap(new_entity.property)
return (old_properties == new_properties
and old_raw_properties == new_raw_properties)
else:
return old_entity == new_entity
def _CalculateWriteOps(composite_indexes, old_entity, new_entity):
"""Determines number of entity and index writes needed to write new_entity.
We assume that old_entity represents the current state of the Datastore.
Args:
composite_indexes: The composite_indexes for the kind of the entities.
old_entity: Entity representing the current state in the Datastore.
new_entity: Entity representing the desired state in the Datastore.
Returns:
A tuple of size 2, where the first value is the number of entity writes and
the second value is the number of index writes.
"""
if _IsNoOpWrite(old_entity, new_entity):
return 0, 0
index_writes = _ChangedIndexRows(composite_indexes, old_entity, new_entity)
if old_entity is None:
index_writes += 1
return 1, index_writes
def _ChangedIndexRows(composite_indexes, old_entity, new_entity):
"""Determine the number of index rows that need to change.
We assume that old_entity represents the current state of the Datastore.
Args:
composite_indexes: The composite_indexes for the kind of the entities.
old_entity: Entity representing the current state in the Datastore.
new_entity: Entity representing the desired state in the Datastore
Returns:
The number of index rows that need to change.
"""
unique_old_properties = collections.defaultdict(set)
unique_new_properties = collections.defaultdict(set)
if old_entity is not None:
for old_prop in old_entity.property:
_PopulateUniquePropertiesSet(old_prop, unique_old_properties)
unchanged = collections.defaultdict(int)
for new_prop in new_entity.property:
new_prop_as_str = _PopulateUniquePropertiesSet(
new_prop, unique_new_properties)
if new_prop_as_str in unique_old_properties[new_prop.name]:
unchanged[new_prop.name] += 1
all_property_names = set(six.iterkeys(unique_old_properties))
all_property_names.update(six.iterkeys(unique_old_properties))
all_property_names.update(six.iterkeys(unchanged))
all_indexes = _GetEntityByPropertyIndexes(all_property_names)
all_indexes.extend([comp.definition for comp in composite_indexes])
path_size = len(new_entity.key.path.element)
writes = 0
for index in all_indexes:
ancestor_multiplier = 1
if index.ancestor and len(index.property) > 1:
ancestor_multiplier = path_size
writes += (_CalculateWritesForCompositeIndex(
index, unique_old_properties, unique_new_properties, unchanged) *
ancestor_multiplier)
return writes
def _PopulateUniquePropertiesSet(prop, unique_properties):
"""Populates a set containing unique properties.
Args:
prop: An entity property.
unique_properties: Dictionary mapping property names to a set of unique
properties.
Returns:
The property pb in string (hashable) form.
"""
if prop.multiple:
prop = _CopyAndSetMultipleToFalse(prop)
prop_as_str = prop.SerializePartialToString()
unique_properties[prop.name].add(prop_as_str)
return prop_as_str
def _CalculateWritesForCompositeIndex(index, unique_old_properties,
unique_new_properties,
common_properties):
"""Calculate the number of writes required to maintain a specific Index.
Args:
index: The composite index.
unique_old_properties: Dictionary mapping property names to a set of props
present on the old entity.
unique_new_properties: Dictionary mapping property names to a set of props
present on the new entity.
common_properties: Dictionary mapping property names to the number of
properties with that name that are present on both the old and new
entities.
Returns:
The number of writes required to maintained the provided index.
"""
old_count = 1
new_count = 1
common_count = 1
for prop in index.property:
old_count *= len(unique_old_properties[prop.name])
new_count *= len(unique_new_properties[prop.name])
common_count *= common_properties[prop.name]
return (old_count - common_count) + (new_count - common_count)
def _GetEntityByPropertyIndexes(all_property_names):
indexes = []
for prop_name in all_property_names:
indexes.append(
_SinglePropertyIndex(prop_name, entity_pb2.Index.Property.ASCENDING))
indexes.append(
_SinglePropertyIndex(prop_name, entity_pb2.Index.Property.DESCENDING))
return indexes
def _SinglePropertyIndex(prop_name, direction):
"""Creates a single property Index for the given name and direction.
Args:
prop_name: The name of the single property on the Index.
direction: The direction of the Index.
Returns:
A single property Index with the given property and direction.
"""
index = entity_pb2.Index()
prop = index.property.add()
prop.name = prop_name
prop.direction = direction
return index
def _CopyAndSetMultipleToFalse(prop):
"""Copy the provided Property and set its "multiple" attribute to False.
Args:
prop: The Property to copy.
Returns:
A copy of the given Property with its "multiple" attribute set to False.
"""
prop_copy = entity_pb2.Property()
prop_copy.MergeFrom(prop)
prop_copy.multiple = False
return prop_copy
def _NormalizeCursor(cursor, first_sort_direction):
"""Normalizes a compiled cursor in place.
Any position specified in the position group is moved to either the
postfix_position or absolute_position field and the position group is
cleared.
If the cursor position does not specify before_ascending, populate it.
If before_ascending is already populated, use it and the provided direction
to set an appropriate value for before.
Args:
cursor: datastore_pb.CompiledCursor
first_sort_direction: first sort direction as returned by _GuessOrders
"""
Check((cursor.HasField('position') + cursor.HasField('postfix_position') +
cursor.HasField('absolute_position')) <= 1,
('Cursor may specify at most one of position, postfix_position, '
'and absolute_position.'))
if cursor.HasField('position'):
pos = cursor.position
if pos.HasField('start_key'):
index_pos = cursor.absolute_position
index_pos.key = pos.start_key
if pos.HasField('start_inclusive'):
index_pos.before = pos.start_inclusive
if pos.HasField('before_ascending'):
index_pos.before_ascending = pos.before_ascending
elif pos.HasField('key') or pos.indexvalue:
postfix_pos = cursor.postfix_position
for value in pos.indexvalue:
index_value = postfix_pos.index_value.add()
index_value.property_name = value.property
index_value.value.MergeFrom(value.value)
if pos.HasField('key'):
postfix_pos.key.MergeFrom(pos.key)
if pos.HasField('start_inclusive'):
postfix_pos.before = pos.start_inclusive
if pos.HasField('before_ascending'):
postfix_pos.before_ascending = pos.before_ascending
cursor.ClearField('position')
pos = None
if cursor.HasField('absolute_position'):
pos = cursor.absolute_position
elif cursor.HasField('postfix_position'):
pos = cursor.postfix_position
if pos:
if pos.HasField('before_ascending'):
_SetBefore(pos, first_sort_direction)
else:
_SetBeforeAscending(pos, first_sort_direction)
def _SetBefore(position, first_direction):
"""Sets the before field in position.
Args:
position: an entity_pb2.IndexPosition or entity_pb2.IndexPostfix
first_direction: the first sort order from the query
(a datastore_pb.Query.Order) or None
"""
position.before = position.before_ascending != (
first_direction == datastore_pb.Query.Order.DESCENDING)
def _SetBeforeAscending(position, first_direction):
"""Sets the before_ascending field in position.
Args:
position: an entity_pb2.IndexPosition or entity_pb2.IndexPostfix
first_direction: the first sort order from the query
(a datastore_pb.Query.Order) or None
"""
position.before_ascending = position.before != (
first_direction == datastore_pb.Query.Order.DESCENDING)
def _CheckConsistencyPolicyForCloudEmulator(consistency_policy):
"""Check if a consistency policy is supported by GCD Emulator.
Args:
consistency_policy: An instance of PseudoRandomHRConsistencyPolicy or
MasterSlaveConsistencyPolicy.
Raises:
UnsupportedConsistencyPolicyError: Consistency policy is not an instance
of the above two policies.
"""
if not isinstance(
consistency_policy, (
MasterSlaveConsistencyPolicy, PseudoRandomHRConsistencyPolicy)):
raise TypeError(
'Using Cloud Datastore Emulator, consistency policy must be in in '
'(%s, %s), found %s instead' % (
MasterSlaveConsistencyPolicy.__name__,
PseudoRandomHRConsistencyPolicy.__name__,
consistency_policy.__class__))
def _BuildEmulatorConfigJson(auto_id_policy=None, consistency_policy=None):
"""Update the emulator config with its client side cache.
Args:
auto_id_policy: A string indicating how GCD Emulator assigns auto IDs,
should be either SEQUENTIAL or SCATTERED.
consistency_policy: An instance of PseudoRandomHRConsistencyPolicy or
MasterSlaveConsistencyPolicy.
Returns:
A dict representing emulator_config.
"""
emulator_config = {}
if auto_id_policy:
_CheckAutoIdPolicy(auto_id_policy)
emulator_config['idAllocationPolicy'] = {'policy': auto_id_policy.upper()}
if consistency_policy:
_CheckConsistencyPolicyForCloudEmulator(consistency_policy)
emulator_config['jobPolicy'] = {}
if isinstance(consistency_policy, MasterSlaveConsistencyPolicy):
emulator_config['jobPolicy']['forceStrongConsistency'] = True
else:
emulator_config['jobPolicy'] = {
'probabilityJobPolicy': {
'randomSeed': consistency_policy.random_seed,
'unappliedJobPercentage': 100.0 * (
1.0 - consistency_policy.probability)}
}
return emulator_config
def UpdateEmulatorConfig(
port, auto_id_policy=None, consistency_policy=None):
"""Update the cloud datastore emulator's config with its client side cache.
Args:
port: A integer indicating the port number of emulator.
auto_id_policy: A string indicating how GCD Emulator assigns auto IDs,
should be either SEQUENTIAL or SCATTERED.
consistency_policy: An instance of PseudoRandomHRConsistencyPolicy or
MasterSlaveConsistencyPolicy.
"""
emulator_config = _BuildEmulatorConfigJson(auto_id_policy, consistency_policy)
global _EMULATOR_CONFIG_CACHE
if not _EMULATOR_CONFIG_CACHE or _EMULATOR_CONFIG_CACHE != emulator_config:
connection = six.moves.http_client.HTTPConnection('localhost', port)
connection.request('PATCH', '/v1/config', json.dumps(emulator_config),
{'Content-Type': 'application/json'})
response = connection.getresponse()
response.read()
_EMULATOR_CONFIG_CACHE = emulator_config
| apache-2.0 | 632,228,481,035,689,100 | 30.409436 | 95 | 0.663829 | false |
charityscience/csh-sms | modules/text_processor.py | 1 | 10816 | import logging
import string
from datetime import datetime
from django.utils import timezone
from django.core.exceptions import MultipleObjectsReturned
from management.models import Contact, Group, Message
from modules.texter import Texter
from modules.utils import quote, add_contact_to_group, keywords_without_word
from modules.date_helper import date_is_valid, date_string_to_date
from modules.i18n import msg_subscribe, msg_unsubscribe, msg_placeholder_child, msg_failure, \
msg_failed_date, subscribe_keywords, msg_already_sub, hindi_born
class TextProcessor(object):
def __init__(self, phone_number):
self.phone_number = phone_number
self.set_language(default=None)
def set_language(self, default):
if self.get_contacts().exists():
self.language = self.contacts.first().language_preference or default
else:
self.language = None
def get_language(self, language, inferred_language, keyword):
# Returns inferred language if the keyword is in the subscribe keywords of the inferred langauge,
# ignoring "born"
subscribe_keys_without_born = keywords_without_word(language=inferred_language, word="born")
if keyword in subscribe_keys_without_born:
return inferred_language
return language
# self.get_contacts() is preferred to self.contact due to triggering a Django DB reload.
def get_contacts(self):
self.contacts = Contact.objects.filter(phone_number=self.phone_number)
return self.contacts
def create_contact(self, child_name, phone_number, date_of_birth, language, preg_update=False):
contact = Contact.objects.filter(name=child_name, phone_number=self.phone_number).first()
if contact:
if contact.cancelled or preg_update:
# Update and resubscribe
contact.cancelled = False
contact.language_preference = language
contact.date_of_birth = date_of_birth
contact.functional_date_of_birth = date_of_birth
contact.preg_update = preg_update
contact.save()
return True
elif Message.objects.filter(contact=contact,
direction="Outgoing",
body=msg_subscribe(language).format(name=contact.name)).exists():
# Already exists (error)
logging.error("Contact for {name} at {phone} was subscribed but already exists!".format(name=child_name, phone=self.phone_number))
return False
# Otherwise, create
update_dict = {"delay_in_days": 0,
"language_preference": self.language,
"date_of_birth": date_of_birth,
"functional_date_of_birth": date_of_birth,
"method_of_sign_up": "Text"}
contact, _ = Contact.objects.update_or_create(name=child_name,
phone_number=phone_number,
defaults=update_dict)
for group_name in ["Text Sign Ups",
"Text Sign Ups - " + self.language.title(),
"Everyone - " + self.language.title()]:
add_contact_to_group(contact, group_name)
self.get_contacts()
return True
def cancel_contacts(self):
for contact in self.contacts:
contact.cancelled = True
contact.save()
return True
def process_subscribe(self, child_name, date_of_birth, preg_update):
if self.create_contact(child_name=child_name,
phone_number=self.phone_number,
date_of_birth=date_of_birth,
language=self.language,
preg_update=preg_update):
return msg_subscribe(self.language).format(name=child_name)
else:
return msg_already_sub(self.language)
def process_unsubscribe(self, child_name, date_of_birth, preg_update=False):
if self.contacts.exists():
contact = self.get_contacts().first()
if contact.name is None or contact.date_of_birth is None or contact.language_preference is None:
logging.error(quote(self.phone_number) + " asked to be unsubscribed but some data is missing on the existing contact object.")
self.cancel_contacts()
else:
logging.error(quote(self.phone_number) + " asked to be unsubscribed but does not exist.")
return msg_unsubscribe(self.language or "English")
def process_failure(self, child_name, date_of_birth, preg_update=False):
return msg_failure(self.language)
def process_failed_date(self, child_name, date_of_birth, preg_update=False):
return msg_failed_date(self.language)
def get_data_from_message(self, message):
"""Get the keyword, child name, and the date from the message.
A text will look like `<KEYWORD> <CHILD> <DATE OF BIRTH>`, like
`REMIND NATHAN 25/11/2015`. Sometimes the child name is omitted."""
message = message.lower().split(" ")
if len(message) == 1:
keyword = message[0]
date = None
child_name = None
elif len(message) == 2:
keyword, date = message
child_name = None
else:
keyword, child_name, date = message[0:3]
date = date_string_to_date(date) if date and date_is_valid(date) else None
return (keyword, child_name, date)
def write_to_database(self, message, date):
keyword, child_name, date_entered = self.get_data_from_message(message)
inferred_language = "Hindi" if keyword and keyword[0] not in string.ascii_lowercase else "English"
language = self.language or inferred_language
if language != inferred_language:
language = self.get_language(language=language,
inferred_language=inferred_language,
keyword=keyword)
if not child_name and self.get_contacts():
contact = self.get_contacts().first()
child_name = contact.name
if child_name:
child_name = child_name.title()
incoming = self.create_message_object(child_name=child_name,
phone_number=self.phone_number,
language=language,
body=message,
direction="Incoming")
contact = Contact.objects.get(pk=incoming.contact.id)
contact.last_heard_from = incoming.created_at
incoming.received_at = date
incoming.save()
contact.save()
self.get_contacts()
return incoming
def process(self, message):
"""This is the main function that is run on an message to process it."""
contact = Contact.objects.get(pk=message.contact.id)
keyword, child_name, date = self.get_data_from_message(message.body)
preg_update = False
if keyword in subscribe_keywords("English"):
self.set_language(default="English")
if keyword == "born":
preg_update = True
action = self.process_subscribe
elif keyword in subscribe_keywords("Hindi"):
self.set_language(default="Hindi")
if keyword == hindi_born():
preg_update = True
action = self.process_subscribe
elif keyword == "end":
self.set_language(default="English")
action = self.process_unsubscribe
else:
self.set_language(default="English")
logging.error("Keyword " + quote(keyword) + " in message " + quote(message.body) +
" was not understood by the system.")
action = self.process_failure
if action == self.process_subscribe:
if child_name is None:
# If a child name is not found, we call them "your child".
child_name = msg_placeholder_child(self.language)
else:
child_name = child_name.title()
if len(child_name) > 50:
action = self.process_failure
if date is None:
logging.error("Date in message " + quote(message.body) + " is invalid.")
action = self.process_failed_date
if action == self.process_subscribe:
logging.info("Subscribing " + quote(message.body) + "...")
elif action == self.process_unsubscribe:
logging.info("Unsubscribing " + quote(contact.phone_number) + "...")
response_text_message = action(child_name=child_name,
date_of_birth=date,
preg_update=preg_update)
outgoing = self.create_message_object(child_name=contact.name,
phone_number=contact.phone_number,
language=self.language,
body=response_text_message,
direction="Outgoing")
contact = Contact.objects.get(pk=outgoing.contact.id)
contact.last_contacted = outgoing.created_at
contact.save()
self.get_contacts()
Texter().send(message=response_text_message,
phone_number=self.phone_number)
outgoing.is_processed = True
outgoing.sent_at = datetime.now().replace(tzinfo=timezone.get_default_timezone())
outgoing.save()
message.is_processed = True
message.save()
return response_text_message
def create_message_object(self, child_name, phone_number, language, body, direction):
if not child_name or len(child_name) > 50:
if not language:
language = "English"
child_name = msg_placeholder_child(language)
try:
contact, _ = Contact.objects.get_or_create(name=child_name,
phone_number=phone_number)
except MultipleObjectsReturned:
contact = Contact.objects.filter(name=child_name,
phone_number=phone_number).first()
contact.language_preference = language
contact.save()
return Message.objects.create(contact=contact, direction=direction, body=body)
| gpl-3.0 | 1,797,699,433,873,005,000 | 43.879668 | 146 | 0.571283 | false |
yoseforb/lollypop | src/selectionlist.py | 1 | 13370 | #!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, GLib, GObject, Pango, cairo
from time import time
from _thread import start_new_thread
from lollypop.utils import translate_artist_name, format_artist_name
from lollypop.define import Type, Lp
class SelectionPopover(Gtk.Popover):
"""
Init popover
"""
def __init__(self):
Gtk.Popover.__init__(self)
self.set_modal(False)
self._label = Gtk.Label()
self._label.set_property('halign', Gtk.Align.CENTER)
self._label.set_property('valign', Gtk.Align.CENTER)
self._label.show()
self.get_style_context().add_class('osd-popover')
self.set_property('width-request', 100)
self.set_property('height-request', 50)
self.add(self._label)
"""
Set popover text
@param text as string
"""
def set_text(self, text):
self._label.set_markup('<span size="large"><b>%s</b></span>' % text)
"""
Ignore
"""
def do_grab_focus(self):
pass
# Keep track of last motion event coordonates
class MotionEvent:
x = 0.0
y = 0.0
# A selection list is a artists or genres scrolled treeview
class SelectionList(Gtk.ScrolledWindow):
__gsignals__ = {
'item-selected': (GObject.SignalFlags.RUN_FIRST, None, (int,)),
'populated': (GObject.SignalFlags.RUN_FIRST, None, ()),
}
"""
Init Selection list ui
"""
def __init__(self):
Gtk.ScrolledWindow.__init__(self)
self.set_policy(Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC)
self._last_motion_event = MotionEvent()
self._previous_motion_y = 0.0
self._loading = False
self._timeout = None
self._to_select_id = Type.NONE
self._updating = False # Sort disabled if False
self._is_artists = False # for string translation
self._pop_time = 0.0 # Keep track of time when starting populate
self._popover = SelectionPopover()
builder = Gtk.Builder()
builder.add_from_resource('/org/gnome/Lollypop/SelectionList.ui')
builder.connect_signals(self)
self._model = builder.get_object('model')
self._model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self._model.set_sort_func(0, self._sort_items)
self._view = builder.get_object('view')
self._view.set_row_separator_func(self._row_separator_func)
renderer0 = Gtk.CellRendererText()
renderer0.set_property('ellipsize-set', True)
renderer0.set_property('ellipsize', Pango.EllipsizeMode.END)
renderer1 = Gtk.CellRendererPixbuf()
column = Gtk.TreeViewColumn('')
column.pack_start(renderer0, True)
column.pack_start(renderer1, True)
column.add_attribute(renderer0, 'text', 1)
column.add_attribute(renderer1, 'icon-name', 2)
column.set_expand(True)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
self._view.append_column(column)
self._view.connect('motion_notify_event', self._on_motion_notify)
self.add(self._view)
adj = self.get_vadjustment()
adj.connect('value_changed', self._on_scroll)
"""
Mark list as artists list
@param is_artists as bool
"""
def mark_as_artists(self, is_artists):
self._is_artists = is_artists
"""
Return True if list is marked as artists
"""
def is_marked_as_artists(self):
return self._is_artists
"""
Populate view with values
@param [(int, str)], will be deleted
@thread safe
"""
def populate(self, values):
self._pop_time = time()
start_new_thread(self._populate, (values,))
"""
Remove row from model
@param object id as int
"""
def remove(self, object_id):
for item in self._model:
if item[0] == object_id:
self._model.remove(item.iter)
break
"""
Add item to list
@param value as (int, str)
"""
def add_value(self, value):
self._updating = True
self._add_value(value)
self._updating = False
"""
Update view with values
@param [(int, str)]
"""
def update_values(self, values):
self._updating = True
for item in self._model:
found = False
for value in values:
if item[1] == value[1]:
found = True
break
# Remove not found items but not devices
if not found and item[0] > Type.DEVICES:
self._model.remove(item.iter)
for value in values:
self._add_value(value)
self._updating = False
"""
Return True if list will select an item on populate
@return selected as bool
"""
def will_be_selected(self):
return self._to_select_id != Type.NONE
"""
Make treeview select first default item
@param object id as int
"""
def select_id(self, object_id):
self._to_select_id = Type.NONE
try:
selected = None
for item in self._model:
if item[0] == object_id:
selected = item.iter
# Select later
if selected is None:
self._to_select_id = object_id
else:
path = self._model.get_path(selected)
self._view.set_cursor(path, None, False)
except:
self._to_select_id = object_id
"""
Get id at current position
@return id as int
"""
def get_selected_id(self):
selected_id = Type.NONE
(path, column) = self._view.get_cursor()
if path is not None:
iterator = self._model.get_iter(path)
if iterator is not None:
selected_id = self._model.get_value(iterator, 0)
return selected_id
"""
Return true if view is being populated
"""
def is_populating(self):
return self._pop_time != 0
"""
Clear treeview
"""
def clear(self):
self._updating = True
self._model.clear()
self._updating = False
#######################
# PRIVATE #
#######################
"""
Add value to the model
@param value as [int, str]
"""
def _add_value(self, value):
found = False
for item in self._model:
if value[0] == item[0]:
found = True
break
if not found:
if self._is_artists:
string = translate_artist_name(value[1])
else:
string = value[1]
self._model.append([value[0],
string,
self._get_icon_name(value[0])])
if value[0] == self._to_select_id:
self.select_id(self._to_select_id)
"""
Populate view with values
@param [(int, str)], will be deleted
@thread safe
"""
def _populate(self, values):
if len(self._model) > 0:
self._updating = True
GLib.idle_add(self._add_values, values, self._pop_time)
"""
Add values to the list
@param items as [(int,str)]
@param time as float
"""
def _add_values(self, values, time):
if time != self._pop_time:
del values
values = None
return
elif not values:
self.emit("populated")
self._updating = False
del values
values = None
self._pop_time = 0
return
value = values.pop(0)
self._add_value(value)
GLib.idle_add(self._add_values, values, time)
"""
Return pixbuf for id
@param ojbect_id as id
"""
def _get_icon_name(self, object_id):
icon = ''
if object_id >= 0:
icon = 'go-next-symbolic'
elif object_id == Type.POPULARS:
icon = 'emblem-favorite-symbolic'
elif object_id == Type.PLAYLISTS:
icon = 'emblem-documents-symbolic'
elif object_id == Type.ALL:
if self._is_artists:
icon = 'media-optical-cd-audio-symbolic'
else:
icon = 'avatar-default-symbolic'
elif object_id == Type.COMPILATIONS:
icon = 'system-users-symbolic'
elif object_id == Type.RECENTS:
icon = 'document-open-recent-symbolic'
elif object_id == Type.RADIOS:
icon = 'audio-input-microphone-symbolic'
elif object_id < Type.DEVICES:
icon = 'multimedia-player-symbolic'
elif object_id == Type.RANDOMS:
icon = 'media-playlist-shuffle-symbolic'
return icon
"""
Sort model
"""
def _sort_items(self, model, itera, iterb, data):
if not self._updating:
return False
a_index = model.get_value(itera, 0)
b_index = model.get_value(iterb, 0)
a = format_artist_name(model.get_value(itera, 1))
b = format_artist_name(model.get_value(iterb, 1))
# Static vs static
if a_index < 0 and b_index < 0:
return a_index < b_index
# Static entries always on top
elif b_index < 0:
return True
# Static entries always on top
if a_index < 0:
return False
# String comparaison for non static
else:
return a.lower() > b.lower()
"""
Draw a separator if needed
@param model as Gtk.TreeModel
@param iterator as Gtk.TreeIter
"""
def _row_separator_func(self, model, iterator):
return model.get_value(iterator, 0) == Type.SEPARATOR
"""
Forward "cursor-changed" as "item-selected" with item id as arg
@param view as Gtk.TreeView
"""
def _on_cursor_changed(self, view):
selected_id = self.get_selected_id()
if not self._updating and selected_id != Type.NONE:
self.emit('item-selected', selected_id)
"""
Disable shortcuts
@param widget as Gtk.widget
@param event as GdK.Event
"""
def _on_focus_in_event(self, widget, event):
Lp.window.enable_global_shorcuts(False)
"""
Enable shortcuts
@param widget as Gtk.widget
@param event as GdK.Event
"""
def _on_focus_out_event(self, widget, event):
Lp.window.enable_global_shorcuts(True)
"""
Hide popover
@param widget as Gtk.widget
@param event as GdK.Event
"""
def _on_leave_event(self, widget=None, event=None):
self._popover.hide()
self._timeout = None
"""
Set motion event
@param widget as Gtk.widget
@param event as GdK.Event
"""
def _on_motion_notify(self, widget, event):
if self._timeout is None:
self._timeout = GLib.timeout_add(500,
self._on_leave_event)
if event.x < 0.0 or event.y < 0.0:
return
self._last_motion_event.x = event.x
self._last_motion_event.y = event.y
"""
Show a popover with current letter
@param adj as Gtk.Adjustement
"""
def _on_scroll(self, adj):
# Only show if scrolled window is huge
if adj.get_upper() < adj.get_page_size() * 3:
return
if self._last_motion_event is None:
return
if self._timeout is not None:
GLib.source_remove(self._timeout)
self._timeout = None
dest_row = self._view.get_dest_row_at_pos(self._last_motion_event.x,
self._last_motion_event.y)
if dest_row is None:
return
row = dest_row[0]
if row is None:
return
row_iter = self._model.get_iter(row)
if row_iter is None or self._model.get_value(row_iter, 0) < 0:
return
text = self._model.get_value(row_iter, 1)
if self._is_artists:
text = format_artist_name(text)
self._popover.set_text(" %s " % text[0].upper())
self._popover.set_relative_to(self)
r = cairo.RectangleInt()
r.x = self.get_allocated_width()
r.y = self._last_motion_event.y
r.width = 1
r.height = 1
self._popover.set_pointing_to(r)
self._popover.set_position(Gtk.PositionType.RIGHT)
self._popover.show()
| gpl-3.0 | 6,226,525,266,078,298,000 | 29.806452 | 77 | 0.553328 | false |
MagiChau/ZonBot | extensions/util.py | 1 | 1563 | import asyncio
from discord.ext import commands
import checks
import discord
class Util():
def __init__(self, bot):
self.bot = bot
@commands.command(name='eval', pass_context=True)
@checks.is_owner()
async def eval(self, ctx, *, code : str):
"""Evaluates code
Usage: !eval <code>
"""
try:
f = eval(code)
if asyncio.iscoroutine(f):
f = await f
except Exception as e:
f = e
try:
await self.bot.say("```py\n{}```".format(f))
except discord.errors.HTTPException:
splits = int(len(f) / 2000)
f = str(f)
for i in range(0, splits):
await self.bot.say(f[i*2000:(i*2000)+1999])
@commands.command(pass_context=True)
@checks.is_owner()
async def exec(self, ctx, *, code : str):
"""Executes code
Usage: !exec <code>
"""
try:
exec(code)
except Exception as e:
try:
self.bot.say("```py\n{}```".format(e))
except Exception as ex:
self.bot.say("```py\n{}```".format(ex))
@commands.command(name='setstatus', help ="Changes bot game status.")
@checks.is_owner()
async def set_status(self, *, game : str):
game = game.strip()
await self.bot.change_status(discord.Game(name=game))
@commands.command()
@checks.is_owner()
async def logout(self):
await self.bot.logout()
def setup(bot):
bot.add_cog(Util(bot)) | mit | -4,751,165,528,354,734,000 | 25.965517 | 73 | 0.523353 | false |
vpv11110000/pyss | examples/1_example_3_15/1_example_3_15.py | 1 | 5595 | # #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
# Пример 3.15
Б.Я.Советов, Моделирование систем. Практикум: Учеб пособие для вузов/Б.Я. Советов, С.А. Яковлев.- 2-е изд., перераб. и доп.-М.:Высш. шк., 2003.-295 с.: ил.
Поток заявок поступает в накопитель с допустимой ёмкостью, равной 3 единицам, равномерно каждые 5+/-1 мин. Если 1-й канал (устройсто) занят, то они поступают на обработку во второй канал. Время обработки 1-го канала равно 9+/-1 мин, 2-го 13+/-1 мин.
Смоделировать обработку 100 заявок.
"""
import sys
import os
import random
import unittest
DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))))) + os.sep
sys.path.append(DIRNAME_MODULE)
sys.path.append(DIRNAME_MODULE + "pyss" + os.sep)
from pyss import pyssobject
from pyss.pyss_model import PyssModel
from pyss.segment import Segment
from pyss import generate
from pyss.generate import Generate
from pyss.terminate import Terminate
from pyss import logger
from pyss.table import Table
from pyss.assemble import Assemble
from pyss.qtable import Qtable
from pyss.handle import Handle
from pyss.enter import Enter
from pyss.leave import Leave
from pyss.storage import Storage
from pyss.advance import Advance
from pyss.assign import Assign
from pyss.preempt import Preempt
from pyss.g_return import GReturn
from pyss.facility import Facility
from pyss.seize import Seize
from pyss.release import Release
from pyss.transfer import Transfer
from pyss.tabulate import Tabulate
from pyss.test import Test
from pyss.queue import Queue
from pyss.depart import Depart
from pyss.split import Split
from pyss.test import Test
from pyss.bprint import Bprint
from pyss.gate import Gate
from pyss.pyss_const import *
from pyss.func_discrete import FuncDiscrete
from pyss.func_exponential import Exponential
from pyss.func_normal import Normal
from pyss.plot_func import PlotFunc
from pyss.simpleobject import SimpleObject
V_FACILITY = "V_FACILITY"
def buildModel():
"""
Поток заявок поступает в накопитель с допустимой ёмкостью, равной 3 единицам, равномерно каждые 5+/-1 мин. Если 1-й канал (устройсто) занят, то они поступают на обработку во второй канал. Время обработки 1-го канала равно 9+/-1 мин, 2-го 13+/-1 мин.
"""
logger.info("-------------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
m[OPTIONS].printResult = True
# Stock STORAGE 4000 ;Warehouse holds 4000 units
NAK3 = "NAK3"
nak3 = Storage(m, storageName=NAK3, storageSize=3, initBusySize=0)
Q_ENTER="Q_ENTER"
# это наименование очереди
QUE2 = "QUE2"
# это метка
FACIL2 = "FACIL2"
# это наименование устройств
F1 = "F1"
F2 = "F2"
#-----------------------------
Generate(sgm,
med_value=5.0, modificatorFunc=lambda o, c:random.uniform(-1.0, 1.0),
first_tx=0.0,
max_amount=100,
priority=1)
Queue(sgm, queueName=Q_ENTER)
Enter(sgm, storageName=NAK3)
Depart(sgm, queueName=Q_ENTER)
# если устройство F1 не занято, то пройдёт насковозь, иначе на блок QUE2
Gate(sgm, condition=GATE_FACILITY_NOT_USED, objectName=F1, nextBlockLabel=QUE2)
Q1 = "Q1"
Queue(sgm, queueName=Q1)
Seize(sgm, facilityName=F1)
Leave(sgm, storageName=NAK3, funcBusySize=1)
Depart(sgm, queueName=Q1)
# поментим, что транзакт обрабатывается F1
def h(o, t):
t[V_FACILITY] = F1
Handle(sgm, handlerFunc=h)
Advance(sgm, meanTime=9, modificatorFunc=1)
Release(sgm, facilityName=F1)
Terminate(sgm, deltaTerminate=0)
#
Q2 = "Q2"
Queue(sgm, QUE2, queueName=Q2)
Seize(sgm, FACIL2, facilityName=F2)
Leave(sgm, storageName=NAK3, funcBusySize=1)
Depart(sgm, queueName=Q2)
def h2(o, t):
t[V_FACILITY] = F2
Handle(sgm, handlerFunc=h2)
Advance(sgm, meanTime=13, modificatorFunc=1)
Release(sgm, facilityName=F2)
Terminate(sgm, deltaTerminate=0)
return m
def main():
logger.info("-------------------------------------")
#-------------------------------
# Время моделирования
MAX_TIME = 10000
### MODEL ----------------------------------
m = buildModel()
### КАРТИНКИ ----------------------
# таблицы
m.initPlotTable()
m.initPlotQueueLifeLine()
# проаннотируем наименованием устройства
m.initPlotTransactLifeLine(funcAnnotate=lambda t:"%d-%s" % (t[NUM], t[V_FACILITY]))
m.initPlotFacilityLifeLine()
m.initPlotStorageLifeLine()
# РАСЧЁТ --------------------------
m.start(terminationCount=100, maxTime=MAX_TIME)
# ПОКАЗ КАРТИНОК ------------------------
m.plotByModulesAndSave("")
m.plotByModulesAndShow()
if __name__ == '__main__':
main()
| mit | -7,208,947,746,968,788,000 | 29.666667 | 254 | 0.656071 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/types/detailed_demographic_service.py | 1 | 1267 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={"GetDetailedDemographicRequest",},
)
class GetDetailedDemographicRequest(proto.Message):
r"""Request message for
[DetailedDemographicService.GetDetailedDemographic][google.ads.googleads.v8.services.DetailedDemographicService.GetDetailedDemographic].
Attributes:
resource_name (str):
Required. Resource name of the
DetailedDemographic to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 829,231,411,696,871,400 | 31.487179 | 140 | 0.726914 | false |
ortoloco/jordbruksmark | jordbruksmark/admin.py | 1 | 1460 | # -*- coding: utf-8 -*-
from django.contrib import admin, messages
from jordbruksmark.models import *
class ParzellenAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "name"]
search_fields = ["id", "code", "name"]
class DuengungsstufenAdmin(admin.ModelAdmin):
list_display = ["__unicode__"]
search_fields = ["id", "name"]
class DuengerAdmin(admin.ModelAdmin):
list_display = ["__unicode__"]
search_fields = ["id", "name"]
class FamilienAdmin(admin.ModelAdmin):
list_display = ["__unicode__"]
search_fields = ["id", "name"]
class KulturenAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "familie_name"]
search_fields = ["id", "name", "familie__name"]
def familie_name(self, obj):
return obj.familie.name
familie_name.admin_order_field = 'familie__name'
class WochenMengenAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "menge"]
search_fields = ["id", "woche", "kultur__name"]
class SaetzeAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "sorte"]
search_fields = ["id", "sorte", "kultur__name", "nummer"]
admin.site.register(Parzelle, ParzellenAdmin)
admin.site.register(Duengungsstufe, DuengungsstufenAdmin)
admin.site.register(Duenger, DuengerAdmin)
admin.site.register(Familie, FamilienAdmin)
admin.site.register(Kultur, KulturenAdmin)
admin.site.register(WochenMenge, WochenMengenAdmin)
admin.site.register(Satz,SaetzeAdmin)
| gpl-3.0 | 5,250,728,470,260,427,000 | 32.953488 | 61 | 0.677397 | false |
mirestrepo/voxels-at-lems | boxm/fill_internal_nodes.py | 1 | 1379 | import boxm_batch;
import os;
import optparse;
boxm_batch.register_processes();
boxm_batch.register_datatypes();
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
print("Filling internal nodes");
#Parse inputs
parser = optparse.OptionParser(description='Fill Internal Nodes');
parser.add_option('--model_dir', action="store", dest="model_dir", type="string", default="");
parser.add_option('--model_name', action="store", dest="model_name", type="string",default="");
options, args = parser.parse_args()
model_dir = options.model_dir;
model_name = options.model_name;
if len(model_dir) == 0:
print "Missing Model Dir"
sys.exit(-1);
if len(model_name) == 0:
print "Missing Model Name"
sys.exit(-1);
print("Creating a Scene");
boxm_batch.init_process("boxmCreateSceneProcess");
boxm_batch.set_input_string(0, model_dir +"/" + str(model_name) + ".xml");
boxm_batch.run_process();
(scene_id, scene_type) = boxm_batch.commit_output(0);
scene = dbvalue(scene_id, scene_type);
print("*************************************");
print("Filling internal nodes");
boxm_batch.init_process("boxm_fill_internal_cells_process");
boxm_batch.set_input_from_db(0, scene);
boxm_batch.run_process();
(scene_id, scene_type) = boxm_batch.commit_output(0);
filled_scene = dbvalue(scene_id, scene_type);
| bsd-2-clause | -807,465,746,511,493,400 | 24.537037 | 95 | 0.677302 | false |
bdang2012/taiga-back-casting | taiga/base/exceptions.py | 1 | 6717 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This code is partially taken from django-rest-framework:
# Copyright (c) 2011-2015, Tom Christie
"""
Handled exceptions raised by REST framework.
In addition Django's built in 403 and 404 exceptions are handled.
(`django.http.Http404` and `django.core.exceptions.PermissionDenied`)
"""
from django.core.exceptions import PermissionDenied as DjangoPermissionDenied
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.http import Http404
from . import response
from . import status
import math
class APIException(Exception):
"""
Base class for REST framework exceptions.
Subclasses should provide `.status_code` and `.default_detail` properties.
"""
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = ""
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class ParseError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _("Malformed request.")
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = _("Incorrect authentication credentials.")
class NotAuthenticated(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = _("Authentication credentials were not provided.")
class PermissionDenied(APIException):
status_code = status.HTTP_403_FORBIDDEN
default_detail = _("You do not have permission to perform this action.")
class MethodNotAllowed(APIException):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
default_detail = _("Method '%s' not allowed.")
def __init__(self, method, detail=None):
self.detail = (detail or self.default_detail) % method
class NotAcceptable(APIException):
status_code = status.HTTP_406_NOT_ACCEPTABLE
default_detail = _("Could not satisfy the request's Accept header")
def __init__(self, detail=None, available_renderers=None):
self.detail = detail or self.default_detail
self.available_renderers = available_renderers
class UnsupportedMediaType(APIException):
status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
default_detail = _("Unsupported media type '%s' in request.")
def __init__(self, media_type, detail=None):
self.detail = (detail or self.default_detail) % media_type
class Throttled(APIException):
status_code = status.HTTP_429_TOO_MANY_REQUESTS
default_detail = _("Request was throttled.")
extra_detail = _("Expected available in %d second%s.")
def __init__(self, wait=None, detail=None):
if wait is None:
self.detail = detail or self.default_detail
self.wait = None
else:
format = "%s%s" % ((detail or self.default_detail), self.extra_detail)
self.detail = format % (wait, wait != 1 and "s" or "")
self.wait = math.ceil(wait)
class BaseException(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _("Unexpected error")
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class NotFound(BaseException, Http404):
"""
Exception used for not found objects.
"""
status_code = status.HTTP_404_NOT_FOUND
default_detail = _("Not found.")
class NotSupported(BaseException):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
default_detail = _("Method not supported for this endpoint.")
class BadRequest(BaseException):
"""
Exception used on bad arguments detected
on api view.
"""
default_detail = _("Wrong arguments.")
class WrongArguments(BadRequest):
"""
Exception used on bad arguments detected
on service. This is same as `BadRequest`.
"""
default_detail = _("Wrong arguments.")
class RequestValidationError(BadRequest):
default_detail = _("Data validation error")
class PermissionDenied(PermissionDenied):
"""
Compatibility subclass of restframework `PermissionDenied`
exception.
"""
pass
class IntegrityError(BadRequest):
default_detail = _("Integrity Error for wrong or invalid arguments")
class PreconditionError(BadRequest):
"""
Error raised on precondition method on viewset.
"""
default_detail = _("Precondition error")
class NotAuthenticated(NotAuthenticated):
"""
Compatibility subclass of restframework `NotAuthenticated`
exception.
"""
pass
def format_exception(exc):
if isinstance(exc.detail, (dict, list, tuple,)):
detail = exc.detail
else:
class_name = exc.__class__.__name__
class_module = exc.__class__.__module__
detail = {
"_error_message": force_text(exc.detail),
"_error_type": "{0}.{1}".format(class_module, class_name)
}
return detail
def exception_handler(exc):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's builtin `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, APIException):
headers = {}
if getattr(exc, "auth_header", None):
headers["WWW-Authenticate"] = exc.auth_header
if getattr(exc, "wait", None):
headers["X-Throttle-Wait-Seconds"] = "%d" % exc.wait
detail = format_exception(exc)
return response.Response(detail, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
return response.NotFound({'_error_message': str(exc)})
elif isinstance(exc, DjangoPermissionDenied):
return response.Forbidden({"_error_message": str(exc)})
# Note: Unhandled exceptions will raise a 500 error.
return None
| agpl-3.0 | -6,705,860,471,032,478,000 | 29.247748 | 82 | 0.689352 | false |
quarkslab/irma | frontend/tests/api/tasks/test_braintasks.py | 1 | 4380 | from unittest import TestCase
from mock import MagicMock, patch
import api.tasks.braintasks as module
from api.tasks.braintasks import BRAIN_SCAN_TASKS
from irma.common.base.exceptions import IrmaCoreError, IrmaTaskError
from irma.common.base.utils import IrmaReturnCode
class TestModuleBraintasks(TestCase):
def setUp(self):
self.old_timeout, self.old_brain_app = module.timeout, module.brain_app
self.m_timeout, self.m_brain_app = MagicMock(), MagicMock()
module.timeout = self.m_timeout
module.brain_app = self.m_brain_app
def tearDown(self):
module.timeout = self.old_timeout
module.brain_app = self.old_brain_app
del self.m_brain_app
del self.m_timeout
@patch("api.tasks.braintasks.sync_call")
def test001_probe_list_raise_task(self, m_sync_call):
expected = "test"
m_sync_call.return_value = (IrmaReturnCode.error, expected)
with self.assertRaises(IrmaTaskError) as context:
module.probe_list()
self.assertEqual(str(context.exception), expected)
m_sync_call.assert_called_once()
@patch("api.tasks.braintasks.sync_call")
def test002_probe_list_raise_core(self, m_sync_call):
expected = "no probe available"
m_sync_call.return_value = (IrmaReturnCode.success, list())
with self.assertRaises(IrmaCoreError) as context:
module.probe_list()
self.assertEqual(str(context.exception), expected)
m_sync_call.assert_called_once()
@patch("api.tasks.braintasks.sync_call")
def test003_probe_list_ok(self, m_sync_call):
expected = ["test"]
m_sync_call.return_value = (IrmaReturnCode.success, expected)
self.assertEqual(module.probe_list(), expected)
@patch("api.tasks.braintasks.sync_call")
def test004_scan_progress(self, m_sync_call):
arg = "test"
result = module.scan_progress(arg)
m_sync_call.assert_called_once_with(self.m_brain_app,
BRAIN_SCAN_TASKS,
"scan_progress",
self.m_timeout,
args=[arg])
self.assertEqual(result, m_sync_call())
@patch("api.tasks.braintasks.sync_call")
def test005_scan_cancel(self, m_sync_call):
arg = "test"
result = module.scan_cancel(arg)
m_sync_call.assert_called_once_with(self.m_brain_app,
BRAIN_SCAN_TASKS,
"scan_cancel",
self.m_timeout,
args=[arg])
self.assertEqual(result, m_sync_call())
@patch("api.tasks.braintasks.async_call")
def test006_scan_launch(self, m_async_call):
args = ["test1", "test2", "test3"]
result = module.scan_launch(*args)
m_async_call.assert_called_once_with(self.m_brain_app,
BRAIN_SCAN_TASKS,
"scan",
args=args)
self.assertEqual(result, m_async_call())
@patch("api.tasks.braintasks.async_call")
def test007_scan_flush(self, m_async_call):
arg = "test"
result = module.scan_flush(arg)
m_async_call.assert_called_once_with(self.m_brain_app,
BRAIN_SCAN_TASKS,
"scan_flush",
args=[arg])
self.assertEqual(result, m_async_call())
@patch("api.tasks.braintasks.sync_call")
def test008_mimetype_filter_raise_task(self, m_sync_call):
expected = "test"
m_sync_call.return_value = (IrmaReturnCode.error, expected)
with self.assertRaises(IrmaTaskError) as context:
module.mimetype_filter_scan_request("whatever")
self.assertEqual(str(context.exception), expected)
@patch("api.tasks.braintasks.sync_call")
def test009_mimetype_filter_ok(self, m_sync_call):
expected = "test"
m_sync_call.return_value = (IrmaReturnCode.success, expected)
res = module.mimetype_filter_scan_request("whatever")
self.assertEqual(res, expected)
| apache-2.0 | -8,905,123,432,380,491,000 | 41.115385 | 79 | 0.573059 | false |
WZQ1397/automatic-repo | python/batch_url_alert/url_monitor/backend/smtpconnection.py | 1 | 2527 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import logging
import smtplib
from email.mime.text import MIMEText
from pony_monitor.conf import settings
class SMTPConnection(object):
"""
以后支持HTML的邮件
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False):
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = username or settings.EMAIL_HOST_USER
self.password = password or settings.EMAIL_HOST_PASSWORD
self.use_tls = (use_tls is not None) and use_tls or settings.EMAIL_USE_TLS
self.fail_silently = fail_silently
self.connection = None
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
try:
self.connection = smtplib.SMTP(self.host, self.port)
if self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
try:
try:
self.connection.quit()
except socket.sslerror:
# This happens when calling quit() on a TLS connection
# sometimes.
self.connection.close()
except:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send(self, recipients, from_email, subject, message):
"""A helper method that does the actual sending."""
# Create a text/plain message
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = from_email
msg['To'] = ','.join(recipients)
try:
self.connection.sendmail(from_email, recipients, msg.as_string())
except Exception, e:
logging.error(e)
if not self.fail_silently:
raise
return False
return True
| lgpl-3.0 | -3,479,046,907,493,722,000 | 30.024691 | 82 | 0.559889 | false |
Berserker66/omnitool | omnitool/Language/english.py | 1 | 3177 | __author__ = "Berserker66"
langversion = 1
langname = "English"
##updater
# text construct: "Version "+version+available+changelog
#example: Version 3 available, click here to download, or for changelog click here
available = " available, click here to download"
changelog = ", or for changelog click here"
##world gen
worldify = "from image"
planetoids = "Planetoids & Terra"
arena = "Dungeon Arena"
flat = "Flatworld"
new = "New world:"
##mainmenu
#omnitool
settings = "Settings"
report_issue = "Report Issue"
exit = "Exit"
#start
start = "Start"
terraria = "Terraria"
steamfree = "Terraria Steamfree"
#open
open = "Open"
imagefolder = "World Images"
backupfolder = "World Backups"
themes = "Omnitool Themes"
#visit
visit = "Visit"
donate = "Donate"
homepage = "Omnitool"
TO = "Terraria Online"
wiki = "Terraria Wiki"
##world thumbnail
label = "World: "
##settings menu
warning = "Some changes require a restart to take effect"
none = "None"
tiny = "Tiny" #unused
small = "Small"
medium = "Medium"
large = "Large"
very_large = "XXL"
theme_select = "Theme select:"
thumbsize = "World Thumbnail size:"
mk_backups = "Make Backups:"
world_columns = "World Columns:"
##world interaction menu
wa_worldactionmenu = "Action for {}:"
wa_imageopen = "Open Image"
wa_renderopen = "Display World"
wa_teditopen = "Open in TEdit"
wa_update = "Update Image"
wa_super = "Generate Super-Image"
##planetoids & terra
pt_start = 'Start generation!'
pt_name = "Name: "
pt_mode = "Mode: "
pt_small = "small Planetoids"
pt_medium = "medium Planetoids"
pt_large = "large Planetoids"
pt_square = "square Planetoids"
pt_both = "large Planetoids & Terra"
pt_square_terra = "square Terra"
pt_start_sel = "Start: "
pt_morning = "Morning"
pt_day = "Day"
pt_night = "Night"
pt_bloodmoon = "Bloodmoon"
pt_extras = "Extras: "
pt_sun = "Sun: "
pt_atlantis = "Atlantis: "
pt_merchant = "Merchant: "
pt_lloot = "Less Loot: "
pt_mirror = "Mirror Mode: "
pt_pre = "Item Prefixes: "
##worldify
w_start = "Start worldification!"
w_cont = "Continue"
w_name = "Name: "
w_rgb = "RGB"
w_hsv = "weighted HSV"
w_method = "Method: "
w_priority = "Priority selection"
w_hue = "Hue: "
w_saturation = "Saturation: "
w_brightness = "Brightness: "
##arena
a_start = "Start generation!"
a_name = "Name: "
a_rooms = "Rooms: "
a_sidelen = "Room sidelength: "
a_corlen = "Corridor length: "
a_chest = "Chest: "
a_itemchest = "Items per chest: "
a_light = "Lighting: "
a_chances = "Room chances: "
a_standard = "Standard: "
a_cross = "Cross Corridor: "
##torch
at_chances = "Color chances:"
at_full = "Full spectrum"
at_blue = "Blue"
at_red = "Red"
at_green = "Green"
at_pink = "Demon"
at_white = "White"
at_yellow = "Yellow"
at_purple = "Purple"
at_lime = "Cursed"
##plugins
pl_start = "Start plugin"
pl_rec = "Select a world to be received"
pl_mod = "Select a world to be modified"
pl_trans = "Select two worlds to be used for a transfer"
pl_trans_source = "Source"
pl_trans_target = "Target"
##flatworld
fw_size = "World size:"
fw_tiny = "tiny"
fw_square = "square"
fw_small = "small"
fw_medium = "medium"
fw_large = "large"
fw_tile = "Tile type:"
fw_wall = "Wall type:"
fw_surf = "Surface type:"
| mit | -2,946,727,446,511,887,000 | 19.235669 | 83 | 0.678628 | false |
BestSonny/examples | center_loss/face_model.py | 1 | 1338 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import BasicBlock, ResNet, model_urls
import math
class FaceModel(nn.Module):
def __init__(self,num_classes, pretrained=False, **kwargs):
super(FaceModel, self).__init__()
self.model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
parameters = model_zoo.load_url(model_urls['resnet18'])
self.model.load_state_dict(parameters)
self.model.avgpool = None
self.model.fc1 = nn.Linear(512*3*4, 512)
self.model.fc2 = nn.Linear(512, 512)
self.model.classifier = nn.Linear(512, num_classes)
self.register_buffer('centers', torch.zeros(num_classes, 512))
self.num_classes = num_classes
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = x.view(x.size(0), -1)
x = self.model.fc1(x)
#feature for center loss
x = self.model.fc2(x)
self.features = x
x = self.model.classifier(x)
return F.log_softmax(x)
| bsd-3-clause | 1,860,480,812,853,672,400 | 35.162162 | 70 | 0.603886 | false |
codedecde/ImageQA | Src/TheanoModel/Code/mlstm_theano.py | 1 | 19880 | #!/usr/bin/env python
import theano
import theano.tensor as T
import numpy
import numpy as np
import pickle as pkl
from collections import OrderedDict
import cPickle as pickle
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
floatX = config.floatX
def shared_to_cpu(shared_params, params):
for k, v in shared_params.iteritems():
params[k] = v.get_value()
def cpu_to_shared(params, shared_params):
for k, v in params.iteritems():
shared_params[k].set_value(v)
def save_model(filename, options, params, shared_params=None):
if not shared_params == None:
shared_to_cpu(shared_params, params);
model = OrderedDict()
model['options'] = options
model['params'] = params
pickle.dump(model, open(filename, 'w'))
def load_model(filename):
model = pickle.load(open(filename, 'rb'))
options = model['options']
params = model['params']
shared_params = init_shared_params(params)
return options, params, shared_params
# return options, params, shared_params
def ortho_weight(ndim):
"""
Random orthogonal weights, we take
the right matrix in the SVD.
Remember in SVD, u has the same # rows as W
and v has the same # of cols as W. So we
are ensuring that the rows are
orthogonal.
"""
W = numpy.random.randn(ndim, ndim)
u, _, _ = numpy.linalg.svd(W)
return u.astype('float32')
# activation function for ff layer
def tanh(x):
return T.tanh(x)
def relu(x):
return T.maximum(x, np.float32(0.))
def linear(x):
return x
def init_fflayer(params, nin, nout, options, prefix='ff'):
''' initialize ff layer
'''
params[prefix + '_w'] = init_weight(nin, nout, options)
params[prefix + '_b'] = np.zeros(nout, dtype='float32')
return params
def init_weight(n, d, options):
''' initialize weight matrix
options['init_type'] determines
gaussian or uniform initlizaiton
'''
if options['init_type'] == 'gaussian':
return (numpy.random.randn(n, d).astype(floatX)) * options['std']
elif options['init_type'] == 'uniform':
# [-range, range]
return ((numpy.random.rand(n, d) * 2 - 1) * \
options['range']).astype(floatX)
elif options['init_type'] == 'glorot uniform':
low = -1.0 * np.sqrt(6.0/(n + d))
high = 1.0 * np.sqrt(6.0/(n + d))
return numpy.random.uniform(low,high,(n,d)).astype(floatX)
layers = {'ff': ('init_fflayer', 'fflayer'),
'lstm': ('init_lstm_layer', 'lstm_layer'),
'lstm_append': (None, 'lstm_append_layer')}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# initialize the parmaters
def init_params(options):
''' Initialize all the parameters
'''
params = OrderedDict()
n_words = options['n_words']
n_emb = options['n_emb']
n_dim = options['n_dim']
n_image_feat = options['n_image_feat']
n_common_feat = options['n_common_feat']
n_output = options['n_output']
n_attention = options['n_attention']
# embedding weights
if 'embedding_file' in options and options['embedding_file'] != '':
embedding_matrix = pkl.load(open(options['embedding_file'], 'r'))[1:].astype(floatX)
params['w_emb'] = embedding_matrix
else:
## use the same initialization as BOW
params['w_emb'] = ((numpy.random.rand(n_words, n_emb) * 2 - 1) * 0.5).astype(floatX)
params = init_fflayer(params, n_image_feat, n_dim, options,
prefix='image_mlp')
# attention model based parameters
params = init_fflayer(params, n_dim, n_output, options,
prefix='scale_to_softmax')
# lstm layer
params = init_lstm_layer(params, n_emb, n_dim, options, prefix='sent_lstm')
# mlstm layer
params = init_mlstm_layer(params,2*n_dim, n_dim, options)
return params
def init_shared_params(params):
''' return a shared version of all parameters
'''
shared_params = OrderedDict()
for k, p in params.iteritems():
shared_params[k] = theano.shared(params[k], name = k)
return shared_params
def fflayer(shared_params, x, options, prefix='ff', act_func='tanh'):
''' fflayer: multiply weight then add bias
'''
return eval(act_func)(T.dot(x, shared_params[prefix + '_w']) +
shared_params[prefix + '_b'])
def dropout_layer(x, dropout, trng, drop_ratio=0.5):
''' dropout layer
'''
x_drop = T.switch(dropout,
(x * trng.binomial(x.shape,
p = 1 - drop_ratio,
n = 1,
dtype = x.dtype) \
/ (numpy.float32(1.0) - drop_ratio)),
x)
return x_drop
def init_lstm_layer(params, nin, ndim, options, prefix='lstm'):
''' initializt lstm layer
'''
params[prefix + '_w_x'] = init_weight(nin, 4 * ndim, options)
# use svd trick to initializ
if options['init_lstm_svd']:
params[prefix + '_w_h'] = np.concatenate([ortho_weight(ndim),
ortho_weight(ndim),
ortho_weight(ndim),
ortho_weight(ndim)],
axis=1)
else:
params[prefix + '_w_h'] = init_weight(ndim, 4 * ndim, options)
params[prefix + '_b_h'] = np.zeros(4 * ndim, dtype='float32')
# set forget bias to be positive
params[prefix + '_b_h'][ndim : 2*ndim] = np.float32(options.get('forget_bias', 0))
return params
def init_wbw_att_layer(params, nin, ndim, options, prefix='wbw_attention'):
'''
Word by Word layer
'''
params[prefix + '_w_y'] = init_weight(ndim,ndim,options)
params[prefix + '_w_h'] = init_weight(ndim,ndim,options)
params[prefix + '_w_r'] = init_weight(ndim,ndim,options)
params[prefix + '_w_alpha'] = init_weight(ndim,1,options)
params[prefix + '_w_t'] = init_weight(ndim,ndim,options)
return params
def init_mlstm_layer(params, nin, ndim, options, prefix='mlstm'):
'''initialize mlstm layer
'''
# Additional Parameters
params[prefix + '_w_s'] = init_weight(ndim, ndim, options)
params[prefix + '_w_m'] = init_weight(ndim, ndim, options)
params[prefix + '_w_t'] = init_weight(ndim, ndim, options)
params[prefix + '_w_alpha'] = init_weight(ndim,1, options)
# LSTM parameters
params[prefix + '_w_x'] = init_weight(nin, 4 * ndim, options)
if options['init_lstm_svd']:
params[prefix + '_w_h'] = np.concatenate([ortho_weight(ndim),
ortho_weight(ndim),
ortho_weight(ndim),
ortho_weight(ndim)],
axis=1)
else:
params[prefix + '_w_h'] = init_weight(ndim, 4 * ndim, options)
params[prefix + '_b_h'] = np.zeros(4 * ndim, dtype='float32')
# set forget bias to be positive
params[prefix + '_b_h'][ndim : 2*ndim] = np.float32(options.get('forget_bias', 0))
return params
def wbw_attention_layer(shared_params, image, question, mask, r_0, options, prefix='wbw_attention',return_final=False):
''' wbw attention layer:
:param shared_params: shared parameters
:param image: batch_size x num_regions x n_dim
:param question : T x batch_size x n_dim
:param r_0 : batch_size x n_dim
:param mask: mask for x, T x batch_size
'''
wbw_w_y = shared_params[prefix + '_w_y'] # n_dim x n_dim
wbw_w_h = shared_params[prefix + '_w_h'] # n_dim x n_dim
wbw_w_r = shared_params[prefix + '_w_r'] # n_dim x n_dim
wbw_w_alpha = shared_params[prefix + '_w_alpha'] # n_dim x 1
wbw_w_t = shared_params[prefix + '_w_t'] # n_dim x n_dim
def recurrent(h_t, mask_t, r_tm1, Y):
# h_t : bt_sz x n_dim
wht = T.dot(h_t, wbw_w_h) # bt_sz x n_dim
# r_tm1 : bt_sz x n_dim
wrtm1 = T.dot(r_tm1, wbw_w_r) # bt_sz x n_dim
tmp = (wht + wrtm1)[:,None,:] # bt_sz x num_regions x n_dim
WY = T.dot(Y, wbw_w_y) # bt_sz x num_regions x n_dim
Mt = tanh(WY + tmp) # bt_sz x num_regions x n_dim
WMt = T.dot(Mt, wbw_w_alpha).flatten(2) # bt_sz x num_regions
alpha_t = T.nnet.softmax(WMt) # bt_sz x num_region
alpha_t = alpha_t.dimshuffle((0,'x',1)) # bt_sz x 1 x num_region
Y_alpha_t = T.batched_dot(alpha_t, Y)[:,0,:] # bt_sz x n_dim
r_t = Y_alpha_t + T.dot(r_tm1, wbw_w_t) # bt_sz x n_dim
r_t = mask_t[:, None] * r_t + (numpy.float32(1.0) - mask_t[:, None]) * r_tm1
return r_t
r, updates = theano.scan(fn = recurrent,
sequences = [question, mask],
non_sequences=[image],
outputs_info = r_0[:question.shape[1]],
n_steps = question.shape[0]
)
if return_final:
return r[-1]
return r
def mlstm_layer(shared_params, Y, x, mask, h_0,c_0, options, prefix='mlstm', ):
'''mlstm layer:
:param shared_params: shared parameters
:param h: input, T x batch_size x n_dim
:param mask: mask for x, T x batch_size
:param Y: image layer: batch_size x num_regions x n_dim
'''
n_dim = options['n_dim']
mlstm_w_x = shared_params[prefix + '_w_x'] # 2 * n_dim x 4 * n_dim
# weight matrix for h, (2*n_dim) x 4*n_dim (ifoc)
mlstm_w_h = shared_params[prefix + '_w_h'] # n_dim x 4 * n_dim
mlstm_b_h = shared_params[prefix + '_b_h'] # 4 * n_dim
mlstm_w_s = shared_params[prefix + '_w_s'] # n_dim x n_dim
mlstm_w_m = shared_params[prefix + '_w_m'] # n_dim x n_dim
mlstm_w_t = shared_params[prefix + '_w_t'] # n_dim x n_dim
mlstm_w_alpha = shared_params[prefix + '_w_alpha'] # n_dim x 1
def recurrent(x_t, mask_t, h_tm1, c_tm1, Y):
'''
x_t : bt x n_dim
mask_t : bt x 1
h_tm1 : bt x n_dim
c_tm1 : bt x ndim
Y : bt x num_regions x n_dim
'''
WY = T.dot(Y, mlstm_w_s)
wh_tm1 = T.dot(h_tm1, mlstm_w_m)
wx_t = T.dot(x_t, mlstm_w_t)
r = (wh_tm1 + wx_t)[:,None,:]
e = T.tanh(WY + r)
we = T.dot(e, mlstm_w_alpha).flatten(2)
alpha_t = T.nnet.softmax(we)
alpha_t = alpha_t.dimshuffle((0,'x',1))
a_t = T.batched_dot(alpha_t, Y)[:,0,:]
m_t = T.concatenate([a_t, x_t], axis=1)
ifoc = T.dot(m_t, mlstm_w_x) + T.dot(h_tm1, mlstm_w_h) + mlstm_b_h
# 0:3*n_dim: input forget and output gate
i_gate = T.nnet.sigmoid(ifoc[:, 0 : n_dim])
f_gate = T.nnet.sigmoid(ifoc[:, n_dim : 2*n_dim])
o_gate = T.nnet.sigmoid(ifoc[:, 2*n_dim : 3*n_dim])
# 3*n_dim : 4*n_dim c_temp
c_temp = T.tanh(ifoc[:, 3*n_dim : 4*n_dim])
# c_t = input_gate * c_temp + forget_gate * c_tm1
c_t = i_gate * c_temp + f_gate * c_tm1
if options['use_tanh']:
h_t = o_gate * T.tanh(c_t)
else:
h_t = o_gate * c_t
# masking
h_t = mask_t[:, None] * h_t + \
(numpy.float32(1.0) - mask_t[:, None]) * h_tm1
c_t = mask_t[:, None] * c_t + \
(numpy.float32(1.0) - mask_t[:, None]) * c_tm1
return h_t, c_t
[h, c], updates = theano.scan(fn = recurrent,
sequences = [x, mask],
outputs_info = [h_0[:x.shape[1]],
c_0[:x.shape[1]]],
non_sequences = [Y],
n_steps = x.shape[0])
return h, c
def lstm_layer(shared_params, x, mask, h_0, c_0, options, prefix='lstm'):
''' lstm layer:
:param shared_params: shared parameters
:param x: input, T x batch_size x n_emb
:param mask: mask for x, T x batch_size
'''
# batch_size = optins['batch_size']
n_dim = options['n_dim']
# weight matrix for x, n_emb x 4*n_dim (ifoc)
lstm_w_x = shared_params[prefix + '_w_x']
# weight matrix for h, n_dim x 4*n_dim
lstm_w_h = shared_params[prefix + '_w_h']
lstm_b_h = shared_params[prefix + '_b_h']
def recurrent(x_t, mask_t, h_tm1, c_tm1):
ifoc = T.dot(x_t, lstm_w_x) + T.dot(h_tm1, lstm_w_h) + lstm_b_h
# 0:3*n_dim: input forget and output gate
i_gate = T.nnet.sigmoid(ifoc[:, 0 : n_dim])
f_gate = T.nnet.sigmoid(ifoc[:, n_dim : 2*n_dim])
o_gate = T.nnet.sigmoid(ifoc[:, 2*n_dim : 3*n_dim])
# 3*n_dim : 4*n_dim c_temp
c_temp = T.tanh(ifoc[:, 3*n_dim : 4*n_dim])
# c_t = input_gate * c_temp + forget_gate * c_tm1
c_t = i_gate * c_temp + f_gate * c_tm1
if options['use_tanh']:
h_t = o_gate * T.tanh(c_t)
else:
h_t = o_gate * c_t
# if mask = 0, then keep the previous c and h
h_t = mask_t[:, None] * h_t + \
(numpy.float32(1.0) - mask_t[:, None]) * h_tm1
c_t = mask_t[:, None] * c_t + \
(numpy.float32(1.0) - mask_t[:, None]) * c_tm1
return h_t, c_t
[h, c], updates = theano.scan(fn = recurrent,
sequences = [x, mask],
outputs_info = [h_0[:x.shape[1]],
c_0[:x.shape[1]]],
n_steps = x.shape[0])
return h, c
def lstm_append_layer_fast(shared_params, x, mask, h_0, c_0, options,
prefix='lstm'):
''' lstm append layer fast: the h_0 and c_0 is not updated during computation
:param shared_params: shared parameters
:param x: input, T x batch_size x n_emb
:param mask: mask for x, T x batch_size
'''
n_dim = options['n_dim']
# weight matrix for x, n_emb x 4*n_dim (ifoc)
lstm_w_x = shared_params[prefix + '_w_x']
# weight matrix for h, n_dim x 4*n_dim
lstm_w_h = shared_params[prefix + '_w_h']
lstm_b_h = shared_params[prefix + '_b_h']
# T x batch_size x dim
ifoc = T.dot(x, lstm_w_x) + T.dot(h_0, lstm_w_h) + lstm_b_h
# 0:3*n_dim: input forget and output gate
i_gate = T.nnet.sigmoid(ifoc[:, :, 0 : n_dim])
f_gate = T.nnet.sigmoid(ifoc[:, :, n_dim : 2*n_dim])
o_gate = T.nnet.sigmoid(ifoc[:, :, 2*n_dim : 3*n_dim])
# 3*n_dim : 4*n_dim c_temp
c_temp = T.tanh(ifoc[:, :, 3*n_dim : 4*n_dim])
# c_t = input_gate * c_temp + forget_gate * c_tm1
c_t = i_gate * c_temp + f_gate * c_0
if options['use_tanh']:
h_t = o_gate * T.tanh(c_t)
else:
h_t = o_gate * c_t
return h_t, c_t
def lstm_append_layer(shared_params, x, mask, h_0, c_0, options, prefix='lstm'):
''' lstm append layer: the h_0 and c_0 is not updated during computation
:param shared_params: shared parameters
:param x: input, T x batch_size x n_emb
:param mask: mask for x, T x batch_size
'''
n_dim = options['n_dim']
# weight matrix for x, n_emb x 4*n_dim (ifoc)
lstm_w_x = shared_params[prefix + '_w_x']
# weight matrix for h, n_dim x 4*n_dim
lstm_w_h = shared_params[prefix + '_w_h']
lstm_b_h = shared_params[prefix + '_b_h']
def recurrent(x_t, mask_t, h_0, c_0):
ifoc = T.dot(x_t, lstm_w_x) + T.dot(h_0, lstm_w_h) + lstm_b_h
# 0:3*n_dim: input forget and output gate
i_gate = T.nnet.sigmoid(ifoc[:, 0 : n_dim])
f_gate = T.nnet.sigmoid(ifoc[:, n_dim : 2*n_dim])
o_gate = T.nnet.sigmoid(ifoc[:, 2*n_dim : 3*n_dim])
# 3*n_dim : 4*n_dim c_temp
c_temp = T.tanh(ifoc[:, 3*n_dim : 4*n_dim])
# c_t = input_gate * c_temp + forget_gate * c_tm1
c_t = i_gate * c_temp + f_gate * c_0
if options['use_tanh']:
h_t = o_gate * T.tanh(c_t)
else:
h_t = o_gate * c_t
# if mask = 0, then keep the previous c and h
h_t = mask_t[:, None] * h_t + \
(numpy.float32(1.0) - mask_t[:, None]) * h_0
c_t = mask_t[:, None] * c_t + \
(numpy.float32(1.0) - mask_t[:, None]) * c_0
return h_t, c_t
[h, c], updates = theano.scan(fn = recurrent,
sequences = [x, mask],
outputs_info = None,
non_sequences = [h_0[:x.shape[1]], c_0[:x.shape[1]]],
n_steps = x.shape[0])
return h, c
def similarity_layer(feat, feat_seq):
def _step(x, y):
return T.sum(x*y, axis=1) / (T.sqrt(T.sum(x*x, axis=1) * \
T.sum(y*y, axis=1))
+ np.float(1e-7))
similarity, updates = theano.scan(fn = _step,
sequences = [feat_seq],
outputs_info = None,
non_sequences = [feat],
n_steps = feat_seq.shape[0])
return similarity
def build_model(shared_params, options):
trng = RandomStreams(1234)
drop_ratio = options['drop_ratio']
batch_size = options['batch_size']
n_dim = options['n_dim']
w_emb = shared_params['w_emb']
dropout = theano.shared(numpy.float32(0.))
image_feat = T.ftensor3('image_feat')
# T x batch_size
input_idx = T.imatrix('input_idx')
input_mask = T.matrix('input_mask')
# label is the TRUE label
label = T.ivector('label')
empty_word = theano.shared(value=np.zeros((1, options['n_emb']),
dtype='float32'),
name='empty_word')
w_emb_extend = T.concatenate([empty_word, shared_params['w_emb']],
axis=0)
input_emb = w_emb_extend[input_idx]
# get the transformed image feature
h_0 = theano.shared(numpy.zeros((batch_size, n_dim), dtype='float32'))
c_0 = theano.shared(numpy.zeros((batch_size, n_dim), dtype='float32'))
if options['sent_drop']:
input_emb = dropout_layer(input_emb, dropout, trng, drop_ratio)
h_from_lstm, c_encode = lstm_layer(shared_params, input_emb, input_mask,
h_0, c_0, options, prefix='sent_lstm')
# pick the last one as encoder
Y = fflayer(shared_params, image_feat, options,
prefix='image_mlp',
act_func=options.get('image_mlp_act',
'tanh'))
hm_0 = theano.shared(numpy.zeros((batch_size, n_dim), dtype='float32'))
cm_0 = theano.shared(numpy.zeros((batch_size, n_dim), dtype='float32'))
h_star,_ = mlstm_layer(shared_params, Y, h_from_lstm, input_mask, hm_0,cm_0, options, prefix='mlstm')
h_star = h_star[-1]
# h_star = T.tanh( T.dot(r, shared_params['W_p_w']) + T.dot(h_from_lstm[-1], shared_params['W_x_w'] ) )
combined_hidden = fflayer(shared_params, h_star, options,
prefix='scale_to_softmax',
act_func='tanh')
# drop the image output
prob = T.nnet.softmax(combined_hidden)
prob_y = prob[T.arange(prob.shape[0]), label]
pred_label = T.argmax(prob, axis=1)
# sum or mean?
cost = -T.mean(T.log(prob_y))
accu = T.mean(T.eq(pred_label, label))
return image_feat, input_idx, input_mask, \
label, dropout, cost, accu
# return image_feat, input_idx, input_mask, \
# label, dropout, cost, accu, pred_label
# h_encode, c_encode, h_decode, c_decode
| mit | -1,631,124,483,685,562,000 | 37.15739 | 119 | 0.526408 | false |
shahabsaf1/Python | plugins/torrents.py | 1 | 2317 | # -*- coding: utf-8 -*-
from __main__ import *
from utils import *
commands = [
'^torrent',
'^t ',
'^kickass'
]
parameters = {('query', True)}
description = 'Search Kickass Torrents. Results may be NSFW.'
action = 'typing'
def get_category_icon(category):
if category == 'Anime':
return u'🇯🇵'
elif category == 'Applications':
return u'📱'
elif category == 'Books':
return u'📖'
elif category == 'Games':
return u'🎮'
elif category == 'Movies':
return u'🎞'
elif category == 'Music':
return u'💽'
elif category == 'TV':
return u'🎞'
elif category == 'XXX':
return u'🔞'
else:
return u'❔'
def run(msg):
input = get_input(msg['text'])
if not input:
doc = get_doc(commands, parameters, description)
return send_message(msg['chat']['id'], doc, parse_mode="Markdown")
url = 'http://kat.cr/json.php'
params = {
'q': input
}
jdat = send_request(url, params)
if not jdat:
return send_error(msg, 'connection')
if jdat['total_results'] == 0:
return send_error(msg, 'results')
limit = 6
if len(jdat['total_results']) < limit:
limit = len(jdat['total_results'])
for v in jdat['list']:
if v['seeds'] == 0:
del v
if len(jdat['list']) == 0:
return send_error(msg, 'results')
message = '*Kickass Search*: "_' + input + '_"\n\n'
for i in range(0, limit):
message += get_category_icon(jdat['list'][i]['category']) + ' [' + delete_markup(jdat['list'][i]['title']) + '](' + get_short_url(jdat['list'][i]['torrentLink']) + ')'
if jdat['list'][i]['verified'] == 0:
message += u' ❗️'
size, unit = get_size(jdat['list'][i]['size'])
message += '\n\t *' + size + ' ' + unit + 'B* | '
size, unit = get_size(jdat['list'][i]['seeds'])
message += '*' + size + unit + '* Seeds'
size, unit = get_size(jdat['list'][i]['votes'])
message += ' | ' + size + unit + u' 👍\n\n'
message = message.replace('&', '&')
send_message(msg['chat']['id'], message, parse_mode="Markdown")
| gpl-2.0 | 6,125,521,446,916,499,000 | 24.523256 | 175 | 0.495397 | false |
MicroMagnum/MicroMagnum | src/magnum/micromagnetics/simple_field.py | 1 | 1577 | # Copyright 2012, 2013 by the Micromagnum authors.
#
# This file is part of MicroMagnum.
#
# MicroMagnum is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MicroMagnum is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MicroMagnum. If not, see <http://www.gnu.org/licenses/>.
import magnum.module as module
from magnum.mesh import VectorField
from magnum.logger import logger
class SimpleVectorField(module.Module):
def __init__(self, var_id):
super(SimpleVectorField, self).__init__()
self.__var_id = var_id
def params(self):
return [self.__var_id]
def initialize(self, system):
logger.info("%s: Providing parameters %s" % (self.name(), ", ".join(self.params())))
A = VectorField(system.mesh)
A.clear()
setattr(self, self.__var_id, A)
# This module is for use as an external field term in the LLG equation
class SimpleExternalField(SimpleVectorField):
def __init__(self, var_id):
super(SimpleExternalField, self).__init__(var_id)
self.__var_id = var_id
def properties(self):
return {'EFFECTIVE_FIELD_TERM': self.__var_id}
| gpl-3.0 | -1,001,538,965,820,108,400 | 34.044444 | 92 | 0.696259 | false |
LazerTrace/LazerTrace | vendor/openctm/bindings/python/openctm.py | 1 | 6310 | #------------------------------------------------------------------------------
# Product: OpenCTM
# File: openctm.py
# Description: Python API bindings (tested with Python 2.5.2 and Python 3.0)
#------------------------------------------------------------------------------
# Copyright (c) 2009-2010 Marcus Geelnard
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not
# be misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
#------------------------------------------------------------------------------
import os
import ctypes
from ctypes import *
from ctypes.util import find_library
# Types
CTMfloat = c_float
CTMint = c_int32
CTMuint = c_uint32
CTMcontext = c_void_p
CTMenum = c_uint32
# Constants
CTM_API_VERSION = 0x00000100
CTM_TRUE = 1
CTM_FALSE = 0
# CTMenum
CTM_NONE = 0x0000
CTM_INVALID_CONTEXT = 0x0001
CTM_INVALID_ARGUMENT = 0x0002
CTM_INVALID_OPERATION = 0x0003
CTM_INVALID_MESH = 0x0004
CTM_OUT_OF_MEMORY = 0x0005
CTM_FILE_ERROR = 0x0006
CTM_BAD_FORMAT = 0x0007
CTM_LZMA_ERROR = 0x0008
CTM_INTERNAL_ERROR = 0x0009
CTM_UNSUPPORTED_FORMAT_VERSION = 0x000A
CTM_IMPORT = 0x0101
CTM_EXPORT = 0x0102
CTM_METHOD_RAW = 0x0201
CTM_METHOD_MG1 = 0x0202
CTM_METHOD_MG2 = 0x0203
CTM_VERTEX_COUNT = 0x0301
CTM_TRIANGLE_COUNT = 0x0302
CTM_HAS_NORMALS = 0x0303
CTM_UV_MAP_COUNT = 0x0304
CTM_ATTRIB_MAP_COUNT = 0x0305
CTM_VERTEX_PRECISION = 0x0306
CTM_NORMAL_PRECISION = 0x0307
CTM_COMPRESSION_METHOD = 0x0308
CTM_FILE_COMMENT = 0x0309
CTM_NAME = 0x0501
CTM_FILE_NAME = 0x0502
CTM_PRECISION = 0x0503
CTM_INDICES = 0x0601
CTM_VERTICES = 0x0602
CTM_NORMALS = 0x0603
CTM_UV_MAP_1 = 0x0700
CTM_UV_MAP_2 = 0x0701
CTM_UV_MAP_3 = 0x0702
CTM_UV_MAP_4 = 0x0703
CTM_UV_MAP_5 = 0x0704
CTM_UV_MAP_6 = 0x0705
CTM_UV_MAP_7 = 0x0706
CTM_UV_MAP_8 = 0x0707
CTM_ATTRIB_MAP_1 = 0x0800
CTM_ATTRIB_MAP_2 = 0x0801
CTM_ATTRIB_MAP_3 = 0x0802
CTM_ATTRIB_MAP_4 = 0x0803
CTM_ATTRIB_MAP_5 = 0x0804
CTM_ATTRIB_MAP_6 = 0x0805
CTM_ATTRIB_MAP_7 = 0x0806
CTM_ATTRIB_MAP_8 = 0x0807
# Load the OpenCTM shared library
if os.name == 'nt':
_lib = WinDLL('openctm.dll')
else:
_libName = find_library('openctm')
if not _libName:
raise Exception('Could not find the OpenCTM shared library.')
_lib = CDLL(_libName)
if not _lib:
raise Exception('Could not open the OpenCTM shared library.')
# Functions
ctmNewContext = _lib.ctmNewContext
ctmNewContext.argtypes = [CTMenum]
ctmNewContext.restype = CTMcontext
ctmFreeContext = _lib.ctmFreeContext
ctmFreeContext.argtypes = [CTMcontext]
ctmGetError = _lib.ctmGetError
ctmGetError.argtypes = [CTMcontext]
ctmGetError.restype = CTMenum
ctmErrorString = _lib.ctmErrorString
ctmErrorString.argtypes = [CTMenum]
ctmErrorString.restype = c_char_p
ctmGetInteger = _lib.ctmGetInteger
ctmGetInteger.argtypes = [CTMcontext, CTMenum]
ctmGetInteger.restype = CTMint
ctmGetFloat = _lib.ctmGetFloat
ctmGetFloat.argtypes = [CTMcontext, CTMenum]
ctmGetFloat.restype = CTMfloat
ctmGetIntegerArray = _lib.ctmGetIntegerArray
ctmGetIntegerArray.argtypes = [CTMcontext, CTMenum]
ctmGetIntegerArray.restype = POINTER(CTMuint)
ctmGetFloatArray = _lib.ctmGetFloatArray
ctmGetFloatArray.argtypes = [CTMcontext, CTMenum]
ctmGetFloatArray.restype = POINTER(CTMfloat)
ctmGetNamedUVMap = _lib.ctmGetNamedUVMap
ctmGetNamedUVMap.argtypes = [CTMcontext, c_char_p]
ctmGetNamedUVMap.restype = CTMenum
ctmGetUVMapString = _lib.ctmGetUVMapString
ctmGetUVMapString.argtypes = [CTMcontext, CTMenum, CTMenum]
ctmGetUVMapString.restype = c_char_p
ctmGetUVMapFloat = _lib.ctmGetUVMapFloat
ctmGetUVMapFloat.argtypes = [CTMcontext, CTMenum, CTMenum]
ctmGetUVMapFloat.restype = CTMfloat
ctmGetNamedAttribMap = _lib.ctmGetNamedAttribMap
ctmGetNamedAttribMap.argtypes = [CTMcontext, c_char_p]
ctmGetNamedAttribMap.restype = CTMenum
ctmGetAttribMapString = _lib.ctmGetAttribMapString
ctmGetAttribMapString.argtypes = [CTMcontext, CTMenum, CTMenum]
ctmGetAttribMapString.restype = c_char_p
ctmGetAttribMapFloat = _lib.ctmGetAttribMapFloat
ctmGetAttribMapFloat.argtypes = [CTMcontext, CTMenum, CTMenum]
ctmGetAttribMapFloat.restype = CTMfloat
ctmGetString = _lib.ctmGetString
ctmGetString.argtypes = [CTMcontext, CTMenum]
ctmGetString.restype = c_char_p
ctmCompressionMethod = _lib.ctmCompressionMethod
ctmCompressionMethod.argtypes = [CTMcontext, CTMenum]
ctmCompressionLevel = _lib.ctmCompressionLevel
ctmCompressionLevel.argtypes = [CTMcontext, CTMuint]
ctmVertexPrecision = _lib.ctmVertexPrecision
ctmVertexPrecision.argtypes = [CTMcontext, CTMfloat]
ctmVertexPrecisionRel = _lib.ctmVertexPrecisionRel
ctmVertexPrecisionRel.argtypes = [CTMcontext, CTMfloat]
ctmNormalPrecision = _lib.ctmNormalPrecision
ctmNormalPrecision.argtypes = [CTMcontext, CTMfloat]
ctmUVCoordPrecision = _lib.ctmUVCoordPrecision
ctmUVCoordPrecision.argtypes = [CTMcontext, CTMenum, CTMfloat]
ctmAttribPrecision = _lib.ctmAttribPrecision
ctmAttribPrecision.argtypes = [CTMcontext, CTMenum, CTMfloat]
ctmFileComment = _lib.ctmFileComment
ctmFileComment.argtypes = [CTMcontext, c_char_p]
ctmDefineMesh = _lib.ctmDefineMesh
ctmDefineMesh.argtypes = [CTMcontext, POINTER(CTMfloat), CTMuint, POINTER(CTMuint), CTMuint, POINTER(CTMfloat)]
ctmAddUVMap = _lib.ctmAddUVMap
ctmAddUVMap.argtypes = [CTMcontext, POINTER(CTMfloat), c_char_p, c_char_p]
ctmAddUVMap.restype = CTMenum
ctmAddAttribMap = _lib.ctmAddAttribMap
ctmAddAttribMap.argtypes = [CTMcontext, POINTER(CTMfloat), c_char_p]
ctmAddAttribMap.restype = CTMenum
ctmLoad = _lib.ctmLoad
ctmLoad.argtypes = [CTMcontext, c_char_p]
ctmSave = _lib.ctmSave
ctmSave.argtypes = [CTMcontext, c_char_p]
| mit | 5,132,959,107,381,138,000 | 29.931373 | 111 | 0.750238 | false |
harshavardhana/tweepy | tweepy/parsers.py | 1 | 2602 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from models import ModelFactory
from common import import_simplejson
from error import TweepError
class Parser(object):
def parse(self, method, payload):
"""
Parse the response payload and return the result.
Returns a tuple that contains the result data and the cursors
(or None if not present).
"""
raise NotImplementedError
def parse_error(self, payload):
"""
Parse the error message from payload.
If unable to parse the message, throw an exception
and default error message will be used.
"""
raise NotImplementedError
class RawParser(Parser):
def __init__(self):
pass
def parse(self, method, payload):
return payload
def parse_error(self, payload):
return payload
class JSONParser(Parser):
payload_format = 'json'
def __init__(self):
self.json_lib = import_simplejson()
def parse(self, method, payload):
try:
json = self.json_lib.loads(payload)
except Exception, e:
raise TweepError('Failed to parse JSON payload: %s' % e)
needsCursors = method.parameters.has_key('cursor')
if needsCursors and isinstance(json, dict) and 'previous_cursor' in json and 'next_cursor' in json:
cursors = json['previous_cursor'], json['next_cursor']
return json, cursors
else:
return json
def parse_error(self, payload):
error = self.json_lib.loads(payload)
if error.has_key('error'):
return error['error']
else:
return error['errors']
class ModelParser(JSONParser):
def __init__(self, model_factory=None):
JSONParser.__init__(self)
self.model_factory = model_factory or ModelFactory
def parse(self, method, payload):
try:
if method.payload_type is None: return
model = getattr(self.model_factory, method.payload_type)
except AttributeError:
raise TweepError('No model for this payload type: %s' % method.payload_type)
json = JSONParser.parse(self, method, payload)
if isinstance(json, tuple):
json, cursors = json
else:
cursors = None
if method.payload_list:
result = model.parse_list(method.api, json)
else:
result = model.parse(method.api, json)
if cursors:
return result, cursors
else:
return result
| apache-2.0 | -8,879,343,879,898,656,000 | 25.824742 | 107 | 0.604919 | false |
shigh/dg-tinker | dg.py | 1 | 5209 |
import numpy as np
from numpy import newaxis
import scipy.sparse as sps
from scipy.sparse.linalg import spsolve
from pyfem.topo import Interval
from pyfem.poly import gll_points
from pyfem.sem import SEMhat
from pyfem.poly import eval_lagrange_d0 as eval_phi1d
from poly import eval_P
from utils import minmod
class Mesh(object):
def __init__(self, K, N, L=1.0, periodic=False):
self.K, self.N, self.L = K, N, L
self.periodic = periodic
semh = SEMhat(N)
n_dofs = K*(N+1)
vertices = np.linspace(0, L, K+1)
EtoV = np.zeros((K, 2), dtype=np.int)
EtoV[:,0] = np.arange(K)
EtoV[:,1] = np.arange(K)+1
self.vertices = vertices
self.EtoV = EtoV
topo = Interval()
xq = topo.ref_to_phys(vertices[EtoV], semh.xgll)
jacb_det = topo.calc_jacb(vertices[EtoV])[0]
dx = np.min(xq[0,1:]-xq[0,:-1])
self.dx, self.xq = dx, xq
if periodic:
EtoV[-1,-1] = EtoV[0,0]
# Restriction operator
# Removes contribution to dirch points from flux operators
R = sps.eye(n_dofs).tocsr()
if not periodic:
R[0,0] = R[-1,-1] = 0.0
self.R = R
# Make elem to dof map
EtoD = np.arange(K*(N+1))
EtoD = EtoD.reshape((K, -1))
self.EtoD = EtoD
dof_phys = xq.ravel()
self.dof_phys = dof_phys
# Averaging operator
rows = EtoD[:,[0,-1]].ravel()
cols = EtoV.ravel()
vals = np.ones_like(cols)
FtoD = sps.coo_matrix((vals, (rows, cols))).tocsr()
AVG = FtoD.dot(FtoD.T)/2.0
self.AVG = AVG
# Extract face DOFS
vals = np.ones(len(rows))
FD = sps.coo_matrix((vals, (rows, rows))).tocsr()
# Set face signs
vals[::2] = -1
SD = sps.coo_matrix((vals, (rows, rows))).tocsr()
self.FD, self.SD = FD, SD
# Jump operator
JUMP = FtoD.dot(SD.dot(FtoD).T)
self.JUMP = JUMP
# Build Advection operator
C = sps.kron(sps.eye(K), semh.Ch).tocsr()
self.C = C
# Build full elemental mass matrix
x, w = topo.get_quadrature(N+1)
P = eval_phi1d(semh.xgll, x).T
G = sps.dia_matrix((w, 0), shape=(len(x), len(x)))
Bf = P.T.dot(G.dot(P))*jacb_det
Bfinv = np.linalg.inv(Bf)
# Using trick from book
V = eval_P(N, semh.xgll).T
Vinv = np.linalg.inv(V)
Minv = V.dot(V.T)/jacb_det
Binv = sps.kron(sps.eye(K), Minv).tocsr()
self.Binv = Binv
self.Vinv, self.V = Vinv, V
class LimiterMUSCL(object):
def __init__(self, mesh):
self.mesh = mesh
N = mesh.N
# elem midpoints
x0 = (mesh.xq[:,0]+mesh.xq[:,-1])/2.0
# elem widths (assuming uniform)
h = (mesh.xq[:,-1]-mesh.xq[:,0])[0]
self.x0, self.h = x0, h
# Remove scale factors built into V
# (b.c. of the def used in nodal-dg)
nv = np.arange(N+1)
gam = 2.0/(2.0*nv+1)
G = sps.dia_matrix((1.0/np.sqrt(gam),0),
shape=mesh.Vinv.shape)
G = G.dot(mesh.Vinv)
self.G = G
def slope_limit(self, U):
x0, h, G = self.x0, self.h, self.G
mesh = self.mesh
N = mesh.N
periodic = mesh.periodic
if not periodic:
u = U
else:
u = np.zeros(U.shape[0]+2*(N+1))
u[(N+1):-(N+1)] = U
us = u.reshape((-1,N+1)).T
if periodic:
us[:,0] = us[:,-2]
us[:,-1] = us[:,1]
avg, slope = G.dot(us)[[0,1]]
# The two comes from the domain of Legendre polys
slope *= 2.0/h
u = u.reshape((-1,N+1))
h2 = h/2.0
h2 = h
m = minmod(slope[1:-1],
(avg[2:]-avg[1:-1])/h2,
(avg[1:-1]-avg[:-2])/h2)
# xq has shape (n_elem, n_dof_per_elem)
# This is why the rest of the arrays need to use newaxis
xq = mesh.xq
if periodic:
u[1:-1] = avg[1:-1,newaxis]+(xq-x0[:,newaxis])*m[:,newaxis]
else:
u[1:-1] = avg[1:-1,newaxis]+(xq-x0[:,newaxis])[1:-1]*m[:,newaxis]
if periodic:
U[:] = u[1:-1].reshape(U.shape)
def apply_limiter(self, u):
self.slope_limit(u[:,0])
self.slope_limit(u[:,1])
self.slope_limit(u[:,2])
class LimiterNULL(object):
def __init__(self, mesh):
pass
def apply_limiter(self, u):
pass
class EqnSetEuler(object):
def __init__(self, gamma=7.0/5.0):
self.gamma = gamma
def calc_flux(self, u):
gamma = self.gamma
f = np.zeros_like(u)
p = (gamma-1)*(u[:,2]-.5*u[:,1]**2/u[:,0])
f[:,0] = u[:,1]
f[:,1] = u[:,1]**2/u[:,0]+p
f[:,2] = (u[:,2]+p)*u[:,1]/u[:,0]
return f
def calc_eig(self, u):
p = self.calc_p(u)
gamma = self.gamma
eig = np.abs(u[:,1]/u[:,0])
eig += np.sqrt(gamma*p/u[:,0])
return eig
def calc_p(self, u):
gamma = self.gamma
p = (gamma-1)*(u[:,2]-.5*u[:,1]**2/u[:,0])
return p
| mit | -2,655,844,388,797,042,700 | 25.441624 | 77 | 0.489153 | false |
kiall/designate-py3 | functionaltests/common/datagen.py | 1 | 4319 | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
from functionaltests.api.v2.models.zone_model import ZoneModel
from functionaltests.api.v2.models.recordset_model import RecordsetModel
from functionaltests.api.v2.models.pool_model import PoolModel
def random_ip():
return ".".join(str(random.randrange(0, 256)) for _ in range(4))
def random_ipv6():
def hexes(n):
return "".join(random.choice("1234567890abcdef") for _ in range(n))
result = ":".join(hexes(4) for _ in range(8))
return result.replace("0000", "0")
def random_string(prefix='rand', n=8, suffix=''):
"""Return a string containing random digits
:param prefix: the exact text to start the string. Defaults to "rand"
:param n: the number of random digits to generate
:param suffix: the exact text to end the string
"""
digits = "".join(str(random.randrange(0, 10)) for _ in range(n))
return prefix + digits + suffix
def random_zone_data(name=None, email=None, ttl=None, description=None):
"""Generate random zone data, with optional overrides
:return: A ZoneModel
"""
if name is None:
name = random_string(prefix='testdomain', suffix='.com.')
if email is None:
email = ("admin@" + name).strip('.')
if description is None:
description = random_string(prefix='Description ')
if ttl is None:
ttl = random.randint(1200, 8400),
return ZoneModel.from_dict({
'name': name,
'email': email,
'ttl': random.randint(1200, 8400),
'description': description})
def random_recordset_data(record_type, zone_name, name=None, records=None,
ttl=None):
"""Generate random recordset data, with optional overrides
:return: A RecordsetModel
"""
if name is None:
name = random_string(prefix=record_type, suffix='.' + zone_name)
if records is None:
records = [random_ip()]
if ttl is None:
ttl = random.randint(1200, 8400)
return RecordsetModel.from_dict({
'type': record_type,
'name': name,
'records': records,
'ttl': ttl})
def random_a_recordset(zone_name, ip=None, **kwargs):
if ip is None:
ip = random_ip()
return random_recordset_data('A', zone_name, records=[ip], **kwargs)
def random_aaaa_recordset(zone_name, ip=None, **kwargs):
if ip is None:
ip = random_ipv6()
return random_recordset_data('AAAA', zone_name, records=[ip], **kwargs)
def random_cname_recordset(zone_name, cname=None, **kwargs):
if cname is None:
cname = zone_name
return random_recordset_data('CNAME', zone_name, records=[cname], **kwargs)
def random_mx_recordset(zone_name, pref=None, host=None, **kwargs):
if pref is None:
pref = str(random.randint(0, 65535))
if host is None:
host = random_string(prefix='mail', suffix='.' + zone_name)
data = "{0} {1}".format(pref, host)
return random_recordset_data('MX', zone_name, records=[data], **kwargs)
def random_pool_data():
ns_zone = random_zone_data().name
data = {
"name": random_string(),
}
ns_records = []
for i in range(0, 2):
ns_records.append("ns%s.%s" % (i, ns_zone))
data["ns_records"] = []
return PoolModel.from_dict(data)
def random_zonefile_data(name=None, ttl=None):
"""Generate random zone data, with optional overrides
:return: A ZoneModel
"""
zone_base = ('$ORIGIN &\n& # IN SOA ns.& nsadmin.& # # # # #\n'
'& # IN NS ns.&\n& # IN MX 10 mail.&\nns.& 360 IN A 1.0.0.1')
if name is None:
name = random_string(prefix='testdomain', suffix='.com.')
if ttl is None:
ttl = str(random.randint(1200, 8400))
return zone_base.replace('&', name).replace('#', ttl)
| apache-2.0 | -2,687,240,077,220,593,000 | 30.525547 | 79 | 0.639963 | false |
pelletier2017/ChessGame | tests/test_game.py | 1 | 2166 | import unittest
import sys
sys.path.append("../")
from chess.game import ChessGame
import chess.player as player
class TestGame(unittest.TestCase):
def test_first_move(self):
p1 = player.RandomComputer()
p2 = player.OldMinimax()
game1 = ChessGame(p1, p2, pause=0, first_move=1)
expected1 = "1 rnbkqbnr pppppppp ........ ........ ........ ........ PPPPPPPP RNBKQBNR"
self.assertEqual(expected1, repr(game1._board))
game2 = ChessGame(p1, p2, pause=0, first_move=2)
expected2 = "2 rnbkqbnr pppppppp ........ ........ ........ ........ PPPPPPPP RNBKQBNR"
self.assertEqual(expected2, repr(game2._board))
def test_get_player(self):
p1 = player.RandomComputer()
p2 = player.OldMinimax()
game = ChessGame(p1, p2)
self.assertEqual(p1, game.player1)
self.assertEqual(p2, game.player2)
def test_get_other_player(self):
p1 = player.RandomComputer()
p2 = player.OldMinimax()
game = ChessGame(p1, p2)
self.assertEqual(p2, game.get_other_player(p1))
self.assertEqual(p1, game.get_other_player(p2))
def test_draw(self):
board_str = "k....... .R...... .R...... ........ ........ ........ ........ .......K"
p1 = player.RandomComputer()
p2 = player.OldMinimax()
game = ChessGame(p1, p2, first_move=1, board=board_str, verbosity=0)
result = game.play()
self.assertEqual(None, result["winner"])
class TestAttributes(unittest.TestCase):
def setUp(self):
p1 = player.RandomComputer()
p2 = player.OldMinimax()
self.game = ChessGame(p1, p2)
def test_pieces(self):
p1_pieces_dict = {'k': 1, 'q': 1, 'b': 2, 'n': 2, 'r': 2, 'p': 8}
p2_pieces_dict = {'K': 1, 'Q': 1, 'B': 2, 'N': 2, 'R': 2, 'P': 8}
pieces_dict = p1_pieces_dict.copy()
pieces_dict.update(p2_pieces_dict)
self.assertDictEqual(self.game.pieces, pieces_dict)
self.assertDictEqual(self.game.p1_pieces, p1_pieces_dict)
self.assertDictEqual(self.game.p2_pieces, p2_pieces_dict)
if __name__ == "__main__":
unittest.main() | mit | 959,988,971,757,558,800 | 33.951613 | 95 | 0.573869 | false |
tschaume/ccsgp_get_started | ccsgp_get_started/examples/utils.py | 1 | 6069 | import sys, os, itertools, inspect, logging, math
import numpy as np
from uncertainties import ufloat
from uncertainties.umath import fsum
from decimal import Decimal
mass_titles = [ 'pi0', 'LMR', 'omphi', 'IMR' ]
eRanges = np.array([ Decimal(str(e)) for e in [ 0, 0.4, 0.75, 1.1, 3. ] ])
def getWorkDirs():
"""get input/output dirs (same input/output layout as for package)"""
# get caller module
caller_fullurl = inspect.stack()[1][1]
caller_relurl = os.path.relpath(caller_fullurl)
caller_modurl = os.path.splitext(caller_relurl)[0]
# split caller_url & append 'Dir' to package name
dirs = caller_modurl.split('/')
dirs[0] = 'data' # TODO de-hardcode
# get, check and create outdir
outDir = os.path.join(*(['output'] + dirs[1:]))
if not os.path.exists(outDir): os.makedirs(outDir)
# get and check indir
dirs.append('input')
inDir = os.path.join(*dirs)
if not os.path.exists(inDir):
logging.critical('create input dir %s to continue!' % inDir)
sys.exit(1)
return inDir, outDir
def getUArray(npArr):
"""uncertainty array multiplied by binwidth (col2 = dx)"""
ufloats = []
for dp in npArr:
u = ufloat(dp[1], abs(dp[3]), 'stat')
v = ufloat(dp[1], abs(dp[4]), 'syst')
r = (u+v)/2.*dp[2]*2.
ufloats.append(r)
# NOTE: center value ok, but both error contribs half!
# see getErrorComponent()
return np.array(ufloats)
def getErrorComponent(result, tag):
"""get total error contribution for component with specific tag"""
return math.sqrt(sum(
(error*2)**2
for (var, error) in result.error_components().items()
if var.tag == tag
))
def getEdges(npArr):
"""get np array of bin edges"""
edges = np.concatenate(([0], npArr[:,0] + npArr[:,2]))
return np.array([Decimal(str(i)) for i in edges])
def getMaskIndices(mask):
"""get lower and upper index of mask"""
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
]
def enumzipEdges(eArr):
"""zip and enumerate edges into pairs of lower and upper limits"""
return enumerate(zip(eArr[:-1], eArr[1:]))
def getCocktailSum(e0, e1, eCocktail, uCocktail):
"""get the cocktail sum for a given data bin range"""
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum
def getMassRangesSums(
indata, suffix = "", customRanges = None,
onlyLMR = False, systLMR = False, singleRange = False
):
eRangesSyst = [ eRanges if customRanges is None else customRanges ]
if systLMR:
step_size, nsteps, rangeOffsetsLMR = 0.05, 6, [0.15, 0.5]
eEdgesSyst = [ [ # all lower & upper edges for LMR syst. study
Decimal(str(rangeOffsetsLMR[j]+i*step_size))
for i in xrange(nsteps)
] for j in xrange(2) ]
# all combos of lower and upper LMR edges
eRangesSyst = [ [ le, ue ] for ue in eEdgesSyst[1] for le in eEdgesSyst[0] ]
onlyLMR = False # flag meaningless in this case
uInData = getUArray(indata)
eInData = getEdges(indata)
uSums = {}
for erngs in eRangesSyst:
for i, (e0, e1) in enumzipEdges(erngs):
if onlyLMR and i != 1: continue
uSum = getCocktailSum(e0, e1, eInData, uInData)
if (not systLMR) and (onlyLMR or singleRange): return uSum
logging.debug('%g - %g: %r' % (e0, e1, uSum))
key = mass_titles[1 if systLMR else i] + suffix
if systLMR: key += '_%s-%s' % (e0,e1)
uSums[key] = uSum
return uSums
def getEnergy4Key(energy):
if energy == '19': return '19.6'
if energy == '62': return '62.4'
return energy
def particleLabel4Key(k):
if k == 'pion': return '{/Symbol \160}^0 {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'eta': return '{/Symbol \150} {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'etap': return '{/Symbol \150}\' {/Symbol \256} e^{+}e^{-}{/Symbol \147}'
if k == 'rho': return '{/Symbol \162} {/Symbol \256} e^{+}e^{-}'
if k == 'omega': return '{/Symbol \167} {/Symbol \256} e^{+}e^{-}({/Symbol \160})'
if k == 'phi': return '{/Symbol \146} {/Symbol \256} e^{+}e^{-}({/Symbol \150})'
if k == 'jpsi': return 'J/{/Symbol \171} {/Symbol \256} e^{+}e^{-}'
if k == 'ccbar':
return 'c@^{/=18-}c {/Symbol \256} D/{/Symbol \514} {/Symbol \256} e^{+}e^{-}'
return k
| mit | 1,884,759,156,905,486,000 | 38.409091 | 86 | 0.605536 | false |
radiasoft/pykern | tests/pkunit_test.py | 1 | 4240 | # -*- coding: utf-8 -*-
u"""PyTest for :mod:`pykern.pkunit`
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_assert_object_with_json():
from pykern import pkunit
pkunit.empty_work_dir()
pkunit.assert_object_with_json('assert1', {'a': 1})
with pytest.raises(AssertionError):
pkunit.assert_object_with_json('assert1', {'b': 1})
def test_data_dir():
import py.path
from pykern import pkunit
expect = _expect('pkunit_data')
d = pkunit.data_dir()
assert isinstance(d, type(py.path.local())), \
'Verify type of data_dir is same as returned by py.path.local'
assert d == expect, \
'Verify data_dir has correct return value'
def test_data_yaml():
from pykern import pkunit
y = pkunit.data_yaml('t1')
assert 'v1' == y['k1'], \
'YAML is read from file in data_dir'
def test_empty_work_dir():
from pykern import pkunit
import py.path
import os
expect = _expect('pkunit_work')
if os.path.exists(str(expect)):
expect.remove(rec=1)
assert not os.path.exists(str(expect)), \
'Ensure directory was removed'
d = pkunit.empty_work_dir()
assert isinstance(d, type(py.path.local())), \
'Verify type of empty_work_dir is same as returned by py.path.local'
assert expect == d, \
'Verify empty_work_dir has correct return value'
assert os.path.exists(str(d)), \
'Ensure directory was created'
def test_import_module_from_data_dir(monkeypatch):
from pykern import pkunit
real_data_dir = pkunit.data_dir()
fake_data_dir = None
def mock_data_dir():
return fake_data_dir
monkeypatch.setattr(pkunit, 'data_dir', mock_data_dir)
fake_data_dir = str(real_data_dir.join('import1'))
assert 'imp1' == pkunit.import_module_from_data_dir('p1').v, \
'import1/p1 should be from "imp1"'
fake_data_dir = str(real_data_dir.join('import2'))
assert 'imp2' == pkunit.import_module_from_data_dir('p1').v, \
'import2/p1 should be from "imp2"'
def test_pkexcept():
import re, inspect
from pykern.pkunit import pkexcept, pkfail
with pkexcept(KeyError, 'should see a KeyError'):
{}['not found']
with pkexcept('KeyError.*xyzzy'):
{}['xyzzy']
try:
lineno = inspect.currentframe().f_lineno + 2
with pkexcept(KeyError, 'xyzzy'):
pass
except AssertionError as e:
assert 'xyzzy' in str(e.args)
assert 'pkunit_test.py:{}:test_pkexcept'.format(lineno) in str(e.args)
except Exception as e:
pkfail('{}: got exception, but not AssertionError', e)
else:
pkfail('did not raise AssertionError')
try:
with pkexcept(KeyError):
raise NameError('whatever')
except AssertionError as e:
assert re.search(r'exception was raised.*but expected.*KeyError', str(e.args))
except Exception as e:
pkfail('{}: got exception, but not AssertionError', e)
else:
pkfail('did not raise AssertionError')
try:
lineno = inspect.currentframe().f_lineno + 2
with pkexcept('any pattern'):
pass
except AssertionError as e:
assert 'pkunit_test.py:{}:test_pkexcept'.format(lineno) in str(e.args)
assert 'was not raised' in str(e.args)
except Exception as e:
pkfail('{}: got exception, but not AssertionError', e)
else:
pkfail('did not raise AssertionError')
def test_pkok():
from pykern.pkunit import pkok
import inspect
assert 1 == pkok(1, 'should not see this'), \
'Result of a successful ok is the condition value'
lineno = inspect.currentframe().f_lineno + 2
try:
pkok(0, 'xyzzy {} {k1}', '333', k1='abc')
except AssertionError as e:
# May not match exactly, because depends on start directory
assert 'pkunit_test.py:{}:test_pkok xyzzy 333 abc'.format(lineno) in str(e.args)
def _expect(base):
import py.path
d = py.path.local(__file__).dirname
return py.path.local(d).join(base).realpath()
| apache-2.0 | -796,621,001,125,067,100 | 30.641791 | 88 | 0.633726 | false |
borjam/exabgp | src/exabgp/configuration/neighbor/api.py | 3 | 4755 | # encoding: utf-8
"""
parse_process.py
Created by Thomas Mangin on 2015-06-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
import time
import copy
from collections import defaultdict
from exabgp.configuration.core import Section
from exabgp.configuration.parser import boolean
from exabgp.configuration.neighbor.parser import processes
class _ParseDirection(Section):
action = {
'parsed': 'set-command',
'packets': 'set-command',
'consolidate': 'set-command',
'open': 'set-command',
'update': 'set-command',
'notification': 'set-command',
'keepalive': 'set-command',
'refresh': 'set-command',
'operational': 'set-command',
}
known = {
'parsed': boolean,
'packets': boolean,
'consolidate': boolean,
'open': boolean,
'update': boolean,
'notification': boolean,
'keepalive': boolean,
'refresh': boolean,
'operational': boolean,
}
default = {
'parsed': True,
'packets': True,
'consolidate': True,
'open': True,
'update': True,
'notification': True,
'keepalive': True,
'refresh': True,
'operational': True,
}
syntax = '{\n %s;\n}' % ';\n '.join(default.keys())
def __init__(self, tokeniser, scope, error):
Section.__init__(self, tokeniser, scope, error)
def clear(self):
pass
def pre(self):
return True
def post(self):
return True
class ParseSend(_ParseDirection):
syntax = 'send %s' % _ParseDirection.syntax
name = 'api/send'
class ParseReceive(_ParseDirection):
syntax = 'receive %s' % _ParseDirection.syntax
name = 'api/receive'
class ParseAPI(Section):
syntax = (
'process {\n'
' processes [ name-of-processes ];\n'
' neighbor-changes;\n'
' %s\n'
' %s\n'
'}' % ('\n '.join(ParseSend.syntax.split('\n')), '\n '.join(ParseReceive.syntax.split('\n')))
)
known = {
'processes': processes,
'neighbor-changes': boolean,
'negotiated': boolean,
'fsm': boolean,
'signal': boolean,
}
action = {
'processes': 'set-command',
'neighbor-changes': 'set-command',
'negotiated': 'set-command',
'fsm': 'set-command',
'signal': 'set-command',
}
default = {
'neighbor-changes': True,
'negotiated': True,
'fsm': True,
'signal': True,
}
DEFAULT_API = {
'neighbor-changes': [],
'negotiated': [],
'fsm': [],
'signal': [],
'processes': [],
}
name = 'api'
def __init__(self, tokeniser, scope, error):
Section.__init__(self, tokeniser, scope, error)
self.api = {}
self.named = ''
@classmethod
def _empty(cls):
return copy.deepcopy(cls.DEFAULT_API)
def clear(self):
self.api = {}
self.named = ''
Section.clear(self)
def pre(self):
named = self.tokeniser.iterate()
self.named = named if named else 'auto-named-%d' % int(time.time() * 1000000)
self.check_name(self.named)
self.scope.enter(self.named)
self.scope.to_context()
return True
def post(self):
self.scope.leave()
self.scope.to_context()
return True
@classmethod
def flatten(cls, apis):
built = cls._empty()
for api in apis.values():
procs = api.get('processes', [])
built.setdefault('processes', []).extend(procs)
for command in ('neighbor-changes', 'negotiated', 'fsm', 'signal'):
built.setdefault(command, []).extend(procs if api.get(command, False) else [])
for direction in ('send', 'receive'):
data = api.get(direction, {})
for action in (
'parsed',
'packets',
'consolidate',
'open',
'update',
'notification',
'keepalive',
'refresh',
'operational',
):
built.setdefault("%s-%s" % (direction, action), []).extend(procs if data.get(action, False) else [])
return built
for way in ('send', 'receive'):
for name in (
'parsed',
'packets',
'consolidate',
'open',
'update',
'notification',
'keepalive',
'refresh',
'operational',
):
ParseAPI.DEFAULT_API["%s-%s" % (way, name)] = []
| bsd-3-clause | 2,026,195,772,931,803,600 | 23.384615 | 120 | 0.515037 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.