ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4a31eaaef6e8749cf0cb40f28cadb72f0b22bc | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .connectors import Connection, StageConnectionDescription, create_connection_description
__all__ = [
'Connection',
'StageConnectionDescription',
'create_connection_description'
]
|
py | 1a4a323bda5ab34c0c2e7900f106dbeaa26da40e | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest.mock import patch
import pandas as pd
from ax.core.base_trial import BaseTrial, TrialStatus
from ax.core.data import Data
from ax.core.generator_run import GeneratorRun, GeneratorRunType
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_arms, get_experiment, get_objective
TEST_DATA = Data(
df=pd.DataFrame(
[
{
"arm_name": "0_0",
"metric_name": get_objective().metric.name,
"mean": 1.0,
"sem": 2.0,
"trial_index": 0,
}
]
)
)
class TrialTest(TestCase):
def setUp(self):
self.experiment = get_experiment()
self.trial = self.experiment.new_trial()
self.arm = get_arms()[0]
self.trial.add_arm(self.arm)
def test_eq(self):
new_trial = self.experiment.new_trial()
self.assertNotEqual(self.trial, new_trial)
def test_basic_properties(self):
self.assertEqual(self.experiment, self.trial.experiment)
self.assertEqual(self.trial.index, 0)
self.assertEqual(self.trial.status, TrialStatus.CANDIDATE)
self.assertIsNotNone(self.trial.time_created)
self.assertEqual(self.trial.arms_by_name["0_0"], self.trial.arm)
self.assertEqual(self.trial.arms, [self.arm])
self.assertEqual(self.trial.abandoned_arms, [])
self.assertEqual(
self.trial.generator_run.generator_run_type, GeneratorRunType.MANUAL.name
)
# Test empty arms
with self.assertRaises(AttributeError):
self.experiment.new_trial().arm_weights
self.trial._status = TrialStatus.COMPLETED
self.assertTrue(self.trial.status.is_completed)
self.assertTrue(self.trial.completed_successfully)
def test_adding_new_trials(self):
new_arm = get_arms()[1]
new_trial = self.experiment.new_trial(
generator_run=GeneratorRun(arms=[new_arm])
)
with self.assertRaises(ValueError):
self.experiment.new_trial(generator_run=GeneratorRun(arms=get_arms()))
self.assertEqual(new_trial.arms_by_name["1_0"], new_arm)
with self.assertRaises(KeyError):
self.trial.arms_by_name["1_0"]
def test_add_trial_same_arm(self):
# Check that adding new arm w/out name works correctly.
new_trial1 = self.experiment.new_trial(
generator_run=GeneratorRun(arms=[self.arm.clone(clear_name=True)])
)
self.assertEqual(new_trial1.arm.name, self.trial.arm.name)
self.assertFalse(new_trial1.arm is self.trial.arm)
# Check that adding new arm with name works correctly.
new_trial2 = self.experiment.new_trial(
generator_run=GeneratorRun(arms=[self.arm.clone()])
)
self.assertEqual(new_trial2.arm.name, self.trial.arm.name)
self.assertFalse(new_trial2.arm is self.trial.arm)
arm_wrong_name = self.arm.clone(clear_name=True)
arm_wrong_name.name = "wrong_name"
with self.assertRaises(ValueError):
new_trial2 = self.experiment.new_trial(
generator_run=GeneratorRun(arms=[arm_wrong_name])
)
def test_abandonment(self):
self.assertFalse(self.trial.status.is_abandoned)
self.trial.mark_abandoned(reason="testing")
self.assertTrue(self.trial.status.is_abandoned)
self.assertFalse(self.trial.status.is_failed)
self.assertTrue(self.trial.did_not_complete)
@patch(
f"{BaseTrial.__module__}.{BaseTrial.__name__}.fetch_data",
return_value=TEST_DATA,
)
def test_objective_mean(self, _mock):
self.assertEqual(self.trial.objective_mean, 1.0)
@patch(
f"{BaseTrial.__module__}.{BaseTrial.__name__}.fetch_data", return_value=Data()
)
def test_objective_mean_empty_df(self, _mock):
with self.assertRaisesRegex(ValueError, "No data was retrieved for trial"):
self.assertIsNone(self.trial.objective_mean)
def testRepr(self):
repr_ = (
"Trial(experiment_name='test', index=0, "
"status=TrialStatus.CANDIDATE, arm=Arm(name='0_0', "
"parameters={'w': 0.85, 'x': 1, 'y': 'baz', 'z': False}))"
)
self.assertEqual(str(self.trial), repr_)
|
py | 1a4a32467d49a12b8e0e88b1c51b9fa627715020 | # Copyright 2015 PLUMgrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import MutableMapping
import ctypes as ct
import multiprocessing
import os
from .libbcc import lib, _RAW_CB_TYPE
from .perf import Perf
from subprocess import check_output
BPF_MAP_TYPE_HASH = 1
BPF_MAP_TYPE_ARRAY = 2
BPF_MAP_TYPE_PROG_ARRAY = 3
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4
BPF_MAP_TYPE_PERCPU_HASH = 5
BPF_MAP_TYPE_PERCPU_ARRAY = 6
BPF_MAP_TYPE_STACK_TRACE = 7
BPF_MAP_TYPE_CGROUP_ARRAY = 8
BPF_MAP_TYPE_LRU_HASH = 9
BPF_MAP_TYPE_LRU_PERCPU_HASH = 10
stars_max = 40
log2_index_max = 65
linear_index_max = 1025
# helper functions, consider moving these to a utils module
def _stars(val, val_max, width):
i = 0
text = ""
while (1):
if (i > (width * val / val_max) - 1) or (i > width - 1):
break
text += "*"
i += 1
if val > val_max:
text = text[:-1] + "+"
return text
def _print_log2_hist(vals, val_type):
global stars_max
log2_dist_max = 64
idx_max = -1
val_max = 0
for i, v in enumerate(vals):
if v > 0: idx_max = i
if v > val_max: val_max = v
if idx_max <= 32:
header = " %-19s : count distribution"
body = "%10d -> %-10d : %-8d |%-*s|"
stars = stars_max
else:
header = " %-29s : count distribution"
body = "%20d -> %-20d : %-8d |%-*s|"
stars = int(stars_max / 2)
if idx_max > 0:
print(header % val_type);
for i in range(1, idx_max + 1):
low = (1 << i) >> 1
high = (1 << i) - 1
if (low == high):
low -= 1
val = vals[i]
print(body % (low, high, val, stars,
_stars(val, val_max, stars)))
def _print_linear_hist(vals, val_type):
global stars_max
log2_dist_max = 64
idx_max = -1
val_max = 0
for i, v in enumerate(vals):
if v > 0: idx_max = i
if v > val_max: val_max = v
header = " %-13s : count distribution"
body = " %-10d : %-8d |%-*s|"
stars = stars_max
if idx_max >= 0:
print(header % val_type);
for i in range(0, idx_max + 1):
val = vals[i]
print(body % (i, val, stars,
_stars(val, val_max, stars)))
def Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs):
"""Table(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
Create a python object out of a reference to a bpf table handle"""
ttype = lib.bpf_table_type_id(bpf.module, map_id)
t = None
if ttype == BPF_MAP_TYPE_HASH:
t = HashTable(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_ARRAY:
t = Array(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_PROG_ARRAY:
t = ProgArray(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_PERF_EVENT_ARRAY:
t = PerfEventArray(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_PERCPU_HASH:
t = PerCpuHash(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
elif ttype == BPF_MAP_TYPE_PERCPU_ARRAY:
t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype, **kwargs)
elif ttype == BPF_MAP_TYPE_STACK_TRACE:
t = StackTrace(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_LRU_HASH:
t = LruHash(bpf, map_id, map_fd, keytype, leaftype)
elif ttype == BPF_MAP_TYPE_LRU_PERCPU_HASH:
t = LruPerCpuHash(bpf, map_id, map_fd, keytype, leaftype)
if t == None:
raise Exception("Unknown table type %d" % ttype)
return t
class TableBase(MutableMapping):
def __init__(self, bpf, map_id, map_fd, keytype, leaftype):
self.bpf = bpf
self.map_id = map_id
self.map_fd = map_fd
self.Key = keytype
self.Leaf = leaftype
self.ttype = lib.bpf_table_type_id(self.bpf.module, self.map_id)
self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id)
self._cbs = {}
def key_sprintf(self, key):
key_p = ct.pointer(key)
buf = ct.create_string_buffer(ct.sizeof(self.Key) * 8)
res = lib.bpf_table_key_snprintf(self.bpf.module, self.map_id,
buf, len(buf), key_p)
if res < 0:
raise Exception("Could not printf key")
return buf.value
def leaf_sprintf(self, leaf):
leaf_p = ct.pointer(leaf)
buf = ct.create_string_buffer(ct.sizeof(self.Leaf) * 8)
res = lib.bpf_table_leaf_snprintf(self.bpf.module, self.map_id,
buf, len(buf), leaf_p)
if res < 0:
raise Exception("Could not printf leaf")
return buf.value
def key_scanf(self, key_str):
key = self.Key()
key_p = ct.pointer(key)
res = lib.bpf_table_key_sscanf(self.bpf.module, self.map_id,
key_str, key_p)
if res < 0:
raise Exception("Could not scanf key")
return key
def leaf_scanf(self, leaf_str):
leaf = self.Leaf()
leaf_p = ct.pointer(leaf)
res = lib.bpf_table_leaf_sscanf(self.bpf.module, self.map_id,
leaf_str, leaf_p)
if res < 0:
raise Exception("Could not scanf leaf")
return leaf
def __getitem__(self, key):
key_p = ct.pointer(key)
leaf = self.Leaf()
leaf_p = ct.pointer(leaf)
res = lib.bpf_lookup_elem(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(leaf_p, ct.c_void_p))
if res < 0:
raise KeyError
return leaf
def __setitem__(self, key, leaf):
key_p = ct.pointer(key)
leaf_p = ct.pointer(leaf)
res = lib.bpf_update_elem(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(leaf_p, ct.c_void_p), 0)
if res < 0:
errstr = os.strerror(ct.get_errno())
raise Exception("Could not update table: %s" % errstr)
# override the MutableMapping's implementation of these since they
# don't handle KeyError nicely
def itervalues(self):
for key in self:
# a map entry may be deleted in between discovering the key and
# fetching the value, suppress such errors
try:
yield self[key]
except KeyError:
pass
def iteritems(self):
for key in self:
try:
yield (key, self[key])
except KeyError:
pass
def items(self):
return [item for item in self.iteritems()]
def values(self):
return [value for value in self.itervalues()]
def clear(self):
# default clear uses popitem, which can race with the bpf prog
for k in self.keys():
self.__delitem__(k)
def zero(self):
# Even though this is not very efficient, we grab the entire list of
# keys before enumerating it. This helps avoid a potential race where
# the leaf assignment changes a hash table bucket that is being
# enumerated by the same loop, and may lead to a hang.
for k in list(self.keys()):
self[k] = self.Leaf()
def __iter__(self):
return TableBase.Iter(self, self.Key)
def iter(self): return self.__iter__()
def keys(self): return self.__iter__()
class Iter(object):
def __init__(self, table, keytype):
self.Key = keytype
self.table = table
k = self.Key()
kp = ct.pointer(k)
# if 0 is a valid key, try a few alternatives
if k in table:
ct.memset(kp, 0xff, ct.sizeof(k))
if k in table:
ct.memset(kp, 0x55, ct.sizeof(k))
if k in table:
raise Exception("Unable to allocate iterator")
self.key = k
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.key = self.table.next(self.key)
return self.key
def next(self, key):
next_key = self.Key()
next_key_p = ct.pointer(next_key)
key_p = ct.pointer(key)
res = lib.bpf_get_next_key(self.map_fd,
ct.cast(key_p, ct.c_void_p),
ct.cast(next_key_p, ct.c_void_p))
if res < 0:
raise StopIteration()
return next_key
def print_log2_hist(self, val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None):
"""print_log2_hist(val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None)
Prints a table as a log2 histogram. The table must be stored as
log2. The val_type argument is optional, and is a column header.
If the histogram has a secondary key, multiple tables will print
and section_header can be used as a header description for each.
If section_print_fn is not None, it will be passed the bucket value
to format into a string as it sees fit. If bucket_fn is not None,
it will be used to produce a bucket value for the histogram keys.
The maximum index allowed is log2_index_max (65), which will
accomodate any 64-bit integer in the histogram.
"""
if isinstance(self.Key(), ct.Structure):
tmp = {}
f1 = self.Key._fields_[0][0]
f2 = self.Key._fields_[1][0]
for k, v in self.items():
bucket = getattr(k, f1)
if bucket_fn:
bucket = bucket_fn(bucket)
vals = tmp[bucket] = tmp.get(bucket, [0] * log2_index_max)
slot = getattr(k, f2)
vals[slot] = v.value
for bucket, vals in tmp.items():
if section_print_fn:
print("\n%s = %s" % (section_header,
section_print_fn(bucket)))
else:
print("\n%s = %r" % (section_header, bucket))
_print_log2_hist(vals, val_type)
else:
vals = [0] * log2_index_max
for k, v in self.items():
vals[k.value] = v.value
_print_log2_hist(vals, val_type)
def print_linear_hist(self, val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None):
"""print_linear_hist(val_type="value", section_header="Bucket ptr",
section_print_fn=None, bucket_fn=None)
Prints a table as a linear histogram. This is intended to span integer
ranges, eg, from 0 to 100. The val_type argument is optional, and is a
column header. If the histogram has a secondary key, multiple tables
will print and section_header can be used as a header description for
each. If section_print_fn is not None, it will be passed the bucket
value to format into a string as it sees fit. If bucket_fn is not None,
it will be used to produce a bucket value for the histogram keys.
The maximum index allowed is linear_index_max (1025), which is hoped
to be sufficient for integer ranges spanned.
"""
if isinstance(self.Key(), ct.Structure):
tmp = {}
f1 = self.Key._fields_[0][0]
f2 = self.Key._fields_[1][0]
for k, v in self.items():
bucket = getattr(k, f1)
if bucket_fn:
bucket = bucket_fn(bucket)
vals = tmp[bucket] = tmp.get(bucket, [0] * linear_index_max)
slot = getattr(k, f2)
vals[slot] = v.value
for bucket, vals in tmp.items():
if section_print_fn:
print("\n%s = %s" % (section_header,
section_print_fn(bucket)))
else:
print("\n%s = %r" % (section_header, bucket))
_print_linear_hist(vals, val_type)
else:
vals = [0] * linear_index_max
for k, v in self.items():
try:
vals[k.value] = v.value
except IndexError:
# Improve error text. If the limit proves a nusiance, this
# function be rewritten to avoid having one.
raise IndexError(("Index in print_linear_hist() of %d " +
"exceeds max of %d.") % (k.value, linear_index_max))
_print_linear_hist(vals, val_type)
class HashTable(TableBase):
def __init__(self, *args, **kwargs):
super(HashTable, self).__init__(*args, **kwargs)
def __len__(self):
i = 0
for k in self: i += 1
return i
def __delitem__(self, key):
key_p = ct.pointer(key)
res = lib.bpf_delete_elem(self.map_fd, ct.cast(key_p, ct.c_void_p))
if res < 0:
raise KeyError
class LruHash(HashTable):
def __init__(self, *args, **kwargs):
super(LruHash, self).__init__(*args, **kwargs)
class ArrayBase(TableBase):
def __init__(self, *args, **kwargs):
super(ArrayBase, self).__init__(*args, **kwargs)
self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module,
self.map_id))
def _normalize_key(self, key):
if isinstance(key, int):
if key < 0:
key = len(self) + key
key = self.Key(key)
if not isinstance(key, ct._SimpleCData):
raise IndexError("Array index must be an integer type")
if key.value >= len(self):
raise IndexError("Array index out of range")
return key
def __len__(self):
return self.max_entries
def __getitem__(self, key):
key = self._normalize_key(key)
return super(ArrayBase, self).__getitem__(key)
def __setitem__(self, key, leaf):
key = self._normalize_key(key)
super(ArrayBase, self).__setitem__(key, leaf)
def __delitem__(self, key):
key = self._normalize_key(key)
key_p = ct.pointer(key)
# Deleting from array type maps does not have an effect, so
# zero out the entry instead.
leaf = self.Leaf()
leaf_p = ct.pointer(leaf)
res = lib.bpf_update_elem(self.map_fd, ct.cast(key_p, ct.c_void_p),
ct.cast(leaf_p, ct.c_void_p), 0)
if res < 0:
raise Exception("Could not clear item")
def __iter__(self):
return ArrayBase.Iter(self, self.Key)
class Iter(object):
def __init__(self, table, keytype):
self.Key = keytype
self.table = table
self.i = -1
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.i += 1
if self.i == len(self.table):
raise StopIteration()
return self.Key(self.i)
class Array(ArrayBase):
def __init__(self, *args, **kwargs):
super(Array, self).__init__(*args, **kwargs)
class ProgArray(ArrayBase):
def __init__(self, *args, **kwargs):
super(ProgArray, self).__init__(*args, **kwargs)
def __setitem__(self, key, leaf):
if isinstance(leaf, int):
leaf = self.Leaf(leaf)
if isinstance(leaf, self.bpf.Function):
leaf = self.Leaf(leaf.fd)
super(ProgArray, self).__setitem__(key, leaf)
class PerfEventArray(ArrayBase):
class Event(object):
def __init__(self, typ, config):
self.typ = typ
self.config = config
HW_CPU_CYCLES = Event(Perf.PERF_TYPE_HARDWARE, 0)
HW_INSTRUCTIONS = Event(Perf.PERF_TYPE_HARDWARE, 1)
HW_CACHE_REFERENCES = Event(Perf.PERF_TYPE_HARDWARE, 2)
HW_CACHE_MISSES = Event(Perf.PERF_TYPE_HARDWARE, 3)
HW_BRANCH_INSTRUCTIONS = Event(Perf.PERF_TYPE_HARDWARE, 4)
HW_BRANCH_MISSES = Event(Perf.PERF_TYPE_HARDWARE, 5)
HW_BUS_CYCLES = Event(Perf.PERF_TYPE_HARDWARE, 6)
HW_STALLED_CYCLES_FRONTEND = Event(Perf.PERF_TYPE_HARDWARE, 7)
HW_STALLED_CYCLES_BACKEND = Event(Perf.PERF_TYPE_HARDWARE, 8)
HW_REF_CPU_CYCLES = Event(Perf.PERF_TYPE_HARDWARE, 9)
# not yet supported, wip
#HW_CACHE_L1D_READ = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|0<<8|0<<16)
#HW_CACHE_L1D_READ_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|0<<8|1<<16)
#HW_CACHE_L1D_WRITE = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|1<<8|0<<16)
#HW_CACHE_L1D_WRITE_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|1<<8|1<<16)
#HW_CACHE_L1D_PREF = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|2<<8|0<<16)
#HW_CACHE_L1D_PREF_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 0<<0|2<<8|1<<16)
#HW_CACHE_L1I_READ = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|0<<8|0<<16)
#HW_CACHE_L1I_READ_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|0<<8|1<<16)
#HW_CACHE_L1I_WRITE = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|1<<8|0<<16)
#HW_CACHE_L1I_WRITE_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|1<<8|1<<16)
#HW_CACHE_L1I_PREF = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|2<<8|0<<16)
#HW_CACHE_L1I_PREF_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 1<<0|2<<8|1<<16)
#HW_CACHE_LL_READ = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|0<<8|0<<16)
#HW_CACHE_LL_READ_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|0<<8|1<<16)
#HW_CACHE_LL_WRITE = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|1<<8|0<<16)
#HW_CACHE_LL_WRITE_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|1<<8|1<<16)
#HW_CACHE_LL_PREF = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|2<<8|0<<16)
#HW_CACHE_LL_PREF_MISS = Event(Perf.PERF_TYPE_HW_CACHE, 2<<0|2<<8|1<<16)
def __init__(self, *args, **kwargs):
super(PerfEventArray, self).__init__(*args, **kwargs)
def __delitem__(self, key):
super(PerfEventArray, self).__delitem__(key)
self.close_perf_buffer(key)
def open_perf_buffer(self, callback):
"""open_perf_buffers(callback)
Opens a set of per-cpu ring buffer to receive custom perf event
data from the bpf program. The callback will be invoked for each
event submitted from the kernel, up to millions per second.
"""
for i in range(0, multiprocessing.cpu_count()):
self._open_perf_buffer(i, callback)
def _open_perf_buffer(self, cpu, callback):
fn = _RAW_CB_TYPE(lambda _, data, size: callback(cpu, data, size))
reader = lib.bpf_open_perf_buffer(fn, None, -1, cpu)
if not reader:
raise Exception("Could not open perf buffer")
fd = lib.perf_reader_fd(reader)
self[self.Key(cpu)] = self.Leaf(fd)
self.bpf._add_kprobe((id(self), cpu), reader)
# keep a refcnt
self._cbs[cpu] = fn
def close_perf_buffer(self, key):
reader = self.bpf.open_kprobes.get((id(self), key))
if reader:
lib.perf_reader_free(reader)
self.bpf._del_kprobe((id(self), key))
del self._cbs[key]
def _open_perf_event(self, cpu, typ, config):
fd = lib.bpf_open_perf_event(typ, config, -1, cpu)
if fd < 0:
raise Exception("bpf_open_perf_event failed")
try:
self[self.Key(cpu)] = self.Leaf(fd)
finally:
# the fd is kept open in the map itself by the kernel
os.close(fd)
def open_perf_event(self, ev):
"""open_perf_event(ev)
Configures the table such that calls from the bpf program to
table.perf_read(bpf_get_smp_processor_id()) will return the hardware
counter denoted by event ev on the local cpu.
"""
if not isinstance(ev, self.Event):
raise Exception("argument must be an Event, got %s", type(ev))
for i in range(0, multiprocessing.cpu_count()):
self._open_perf_event(i, ev.typ, ev.config)
class PerCpuHash(HashTable):
def __init__(self, *args, **kwargs):
self.reducer = kwargs.pop("reducer", None)
super(PerCpuHash, self).__init__(*args, **kwargs)
self.sLeaf = self.Leaf
self.total_cpu = multiprocessing.cpu_count()
# This needs to be 8 as hard coded into the linux kernel.
self.alignment = ct.sizeof(self.sLeaf) % 8
if self.alignment is 0:
self.Leaf = self.sLeaf * self.total_cpu
else:
# Currently Float, Char, un-aligned structs are not supported
if self.sLeaf == ct.c_uint:
self.Leaf = ct.c_uint64 * self.total_cpu
elif self.sLeaf == ct.c_int:
self.Leaf = ct.c_int64 * self.total_cpu
else:
raise IndexError("Leaf must be aligned to 8 bytes")
def getvalue(self, key):
result = super(PerCpuHash, self).__getitem__(key)
if self.alignment is 0:
ret = result
else:
ret = (self.sLeaf * self.total_cpu)()
for i in range(0, self.total_cpu):
ret[i] = result[i]
return ret
def __getitem__(self, key):
if self.reducer:
return reduce(self.reducer, self.getvalue(key))
else:
return self.getvalue(key)
def __setitem__(self, key, leaf):
super(PerCpuHash, self).__setitem__(key, leaf)
def sum(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default sum functions")
return self.sLeaf(reduce(lambda x,y: x+y, self.getvalue(key)))
def max(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default max functions")
return self.sLeaf(max(self.getvalue(key)))
def average(self, key):
result = self.sum(key)
result.value/=self.total_cpu
return result
class LruPerCpuHash(PerCpuHash):
def __init__(self, *args, **kwargs):
super(LruPerCpuHash, self).__init__(*args, **kwargs)
class PerCpuArray(ArrayBase):
def __init__(self, *args, **kwargs):
self.reducer = kwargs.pop("reducer", None)
super(PerCpuArray, self).__init__(*args, **kwargs)
self.sLeaf = self.Leaf
self.total_cpu = multiprocessing.cpu_count()
# This needs to be 8 as hard coded into the linux kernel.
self.alignment = ct.sizeof(self.sLeaf) % 8
if self.alignment is 0:
self.Leaf = self.sLeaf * self.total_cpu
else:
# Currently Float, Char, un-aligned structs are not supported
if self.sLeaf == ct.c_uint:
self.Leaf = ct.c_uint64 * self.total_cpu
elif self.sLeaf == ct.c_int:
self.Leaf = ct.c_int64 * self.total_cpu
else:
raise IndexError("Leaf must be aligned to 8 bytes")
def getvalue(self, key):
result = super(PerCpuArray, self).__getitem__(key)
if self.alignment is 0:
ret = result
else:
ret = (self.sLeaf * self.total_cpu)()
for i in range(0, self.total_cpu):
ret[i] = result[i]
return ret
def __getitem__(self, key):
if (self.reducer):
return reduce(self.reducer, self.getvalue(key))
else:
return self.getvalue(key)
def __setitem__(self, key, leaf):
super(PerCpuArray, self).__setitem__(key, leaf)
def sum(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default sum functions")
return self.sLeaf(reduce(lambda x,y: x+y, self.getvalue(key)))
def max(self, key):
if isinstance(self.Leaf(), ct.Structure):
raise IndexError("Leaf must be an integer type for default max functions")
return self.sLeaf(max(self.getvalue(key)))
def average(self, key):
result = self.sum(key)
result.value/=self.total_cpu
return result
class StackTrace(TableBase):
MAX_DEPTH = 127
def __init__(self, *args, **kwargs):
super(StackTrace, self).__init__(*args, **kwargs)
class StackWalker(object):
def __init__(self, stack, resolve=None):
self.stack = stack
self.n = -1
self.resolve = resolve
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
self.n += 1
if self.n == StackTrace.MAX_DEPTH:
raise StopIteration()
addr = self.stack.ip[self.n]
if addr == 0 :
raise StopIteration()
return self.resolve(addr) if self.resolve else addr
def walk(self, stack_id, resolve=None):
return StackTrace.StackWalker(self[self.Key(stack_id)], resolve)
def __len__(self):
i = 0
for k in self: i += 1
return i
def __delitem__(self, key):
key_p = ct.pointer(key)
res = lib.bpf_delete_elem(self.map_fd, ct.cast(key_p, ct.c_void_p))
if res < 0:
raise KeyError
def clear(self):
pass
|
py | 1a4a326c9c33331c1540ffed26fbfdd42ae6d2cd | # (C) Copyright 2017 IBM Corp.
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Click command definition for the server command group which includes
cmds for inspection and management of the objects defined by the pywbem
server class including namespaces, WBEMServer information, and profile
information.
NOTE: Commands are ordered in help display by their order in this file.
"""
from __future__ import absolute_import, print_function
import os
import sys
import click
import six
from pywbem import Error, MOFCompiler, ModelError
from pywbem._mof_compiler import MOFWBEMConnection, MOFCompileError
from pywbem._nocasedict import NocaseDict
from nocaselist import NocaseList
from .pywbemcli import cli
from ._common import pywbem_error_exception, parse_version_value, \
is_experimental_class
from ._common_options import namespace_option
from ._cmd_namespace import cmd_namespace_list, cmd_namespace_interop
from .._utils import pywbemtools_warn
from .._click_extensions import PywbemtoolsGroup, PywbemtoolsCommand, \
CMD_OPTS_TXT, GENERAL_OPTS_TXT, SUBCMD_HELP_TXT
from .._options import add_options, help_option
from .._output_formatting import validate_output_format, format_table, \
display_text, fold_strings
# NOTE: A number of the options use double-dash as the short form. In those
# cases, a third definition of the options without the double-dash defines
# the corresponding option name, ex. 'include_qualifiers'. It should be
# defined with underscore and not dash
# Issue 224 - Exception in prompt-toolkit with python 2.7. Caused because
# with prompt-toolkit 2 + the completer requires unicode and click_repl not
# passing help as unicode in options as unicode
# NOTE: Insure that all option help attributes are unicode to get around this
# issue
#
# Common option definitions for server group
#
mof_include_option = [ # pylint: disable=invalid-name
click.option('--include', '-I', metavar='INCLUDEDIR', multiple=True,
help=u'Path name of a MOF include directory. '
'May be specified multiple times.')]
mof_dry_run_option = [ # pylint: disable=invalid-name
click.option('--dry-run', '-d', is_flag=True, default=False,
help=u'Enable dry-run mode: Don\'t actually modify the '
'server. Connection to the server is still required for '
'reading.')]
@cli.group('server', cls=PywbemtoolsGroup, options_metavar=GENERAL_OPTS_TXT,
subcommand_metavar=SUBCMD_HELP_TXT)
@add_options(help_option)
def server_group():
"""
Command group for WBEM servers.
This command group defines commands to inspect and manage core components
of a WBEM server including server attributes, namespaces, compiling MOF,
the Interop namespace and schema information.
In addition to the command-specific options shown in this help text, the
general options (see 'pywbemcli --help') can also be specified before the
'server' keyword.
"""
pass # pylint: disable=unnecessary-pass
@server_group.command('namespaces', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def server_namespaces(context):
"""
List the namespaces of the server (deprecated).
The Interop namespace must exist on the server.
Deprecated: The 'server namespaces' command is deprecated and will be
removed in a future version. Use the 'namespace list' command instead.
"""
pywbemtools_warn(
"The 'server namespaces' command is deprecated and will be removed in "
"a future version. Use the 'namespace list' command instead.",
DeprecationWarning)
context.execute_cmd(lambda: cmd_namespace_list(context))
@server_group.command('interop', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def server_interop(context):
"""
Get the Interop namespace of the server (deprecated).
The Interop namespace must exist on the server.
Deprecated: The 'server interop' command is deprecated and will be removed
in a future version. Use the 'namespace interop' command instead.
"""
pywbemtools_warn(
"The 'server interop' command is deprecated and will be removed in "
"a future version. Use the 'namespace interop' command instead.",
DeprecationWarning)
context.execute_cmd(lambda: cmd_namespace_interop(context))
@server_group.command('brand', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def server_brand(context):
"""
Get the brand of the server.
Brand information is defined by the server implementor and may or may
not be available. Pywbem attempts to collect the brand information from
multiple sources.
"""
# pylint: disable=too-many-function-args
context.execute_cmd(lambda: cmd_server_brand(context))
@server_group.command('info', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def server_info(context):
"""
Get information about the server.
The information includes CIM namespaces and server brand.
"""
context.execute_cmd(lambda: cmd_server_info(context))
@server_group.command('add-mof', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('moffiles', metavar='MOFFILE', type=click.Path(),
nargs=-1, required=True)
@add_options(namespace_option)
@add_options(mof_include_option)
@add_options(mof_dry_run_option)
@add_options(help_option)
@click.pass_obj
def server_add_mof(context, **options):
"""
Compile MOF and add/update CIM objects in the server.
The MOF files are specified with the MOFFILE argument, which may be
specified multiple times. The minus sign ('-') specifies the standard
input.
Initially, the target namespace is the namespace specified with the
--namespace option or if not specified the default namespace of the
connection. If the MOF contains '#pragma namespace' directives, the target
namespace will be changed accordingly.
MOF include files (specified with the '#pragma include' directive) are
searched first in the directory of the including MOF file, and then in
the directories specified with the --include option.
Any CIM objects (instances, classes and qualifiers) specified in the MOF
files are created in the server, or modified if they already exist in the
server.
The global --verbose option will show the CIM objects that are created or
modified.
"""
context.execute_cmd(lambda: cmd_server_add_mof(context, options))
@server_group.command('remove-mof', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('moffiles', metavar='MOFFILE', type=click.Path(),
nargs=-1, required=True)
@add_options(namespace_option)
@add_options(mof_include_option)
@add_options(mof_dry_run_option)
@add_options(help_option)
@click.pass_obj
def server_remove_mof(context, **options):
"""
Compile MOF and remove CIM objects from the server.
The MOF files are specified with the MOFFILE argument, which may be
specified multiple times. The minus sign ('-') specifies the standard
input.
Initially, the target namespace is the namespace specified with the
--namespace option or if not specified the default namespace of the
connection. If the MOF contains '#pragma namespace' directives, the target
namespace will be changed accordingly.
MOF include files (specified with the '#pragma include' directive) are
searched first in the directory of the including MOF file, and then in
the directories specified with the --include option.
Any CIM objects (instances, classes and qualifiers) specified in the MOF
files are deleted from the server.
The global --verbose option will show the CIM objects that are removed.
"""
context.execute_cmd(lambda: cmd_server_remove_mof(context, options))
@server_group.command('schema', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(namespace_option)
@click.option('-d', '--detail', is_flag=True, default=False,
help=u'Display details about each schema in the namespace rather '
u'than accumulated for the namespace.')
@add_options(help_option)
@click.pass_obj
def server_schema(context, **options):
"""
Get information about the server schemas.
Gets information about the schemas and CIM schemas that define the classes
in each namespace. The information provided includes:
* The released DMTF CIM schema version that was the source for the
qualifier declarations and classes for the namespace.
* Experimental vs. final elements in the schema
* Schema name (defined by the prefix on each class before the first '_')
* Class count
"""
context.execute_cmd(lambda: cmd_server_schema(context, options))
###############################################################
# Server cmds
###############################################################
def cmd_server_brand(context):
"""
Display product and version info of the current WBEM server
"""
wbem_server = context.pywbem_server.wbem_server
output_format = validate_output_format(context.output_format, 'TEXT')
try:
brand = wbem_server.brand
context.spinner_stop()
display_text(brand, output_format)
except Error as er:
raise pywbem_error_exception(er)
def cmd_server_info(context):
"""
Display general overview of info from current WBEM server
"""
wbem_server = context.pywbem_server.wbem_server
output_format = validate_output_format(context.output_format, 'TABLE')
try:
# Execute the namespaces to force contact with server before
# turning off the spinner.
namespaces = sorted(wbem_server.namespaces)
context.spinner_stop()
rows = []
headers = ['Brand', 'Version', 'Interop Namespace', 'Namespaces']
sep = '\n' if namespaces and len(namespaces) > 3 else ', '
namespaces = sep.join(namespaces)
rows.append([wbem_server.brand, wbem_server.version,
wbem_server.interop_ns,
namespaces])
click.echo(format_table(rows, headers,
title='Server General Information',
table_format=output_format))
except Error as er:
raise pywbem_error_exception(er)
def cmd_server_add_mof(context, options):
"""
Compile MOF and add/update CIM objects in the server.
"""
conn = context.pywbem_server.conn
try:
context.spinner_stop()
# Define the connection to be used by the MOF compiler.
# MOFWBEMConnection writes resulting CIM objects to a local store
# but reads from the connection.
if options['dry_run']:
comp_handle = MOFWBEMConnection(conn=conn)
else:
comp_handle = conn
if options['dry_run']:
print('Executing in dry-run mode')
include_dirs = []
for idir in options['include']:
if not os.path.isabs(idir):
idir = os.path.abspath(idir)
include_dirs.append(idir)
for moffile in options['moffiles']:
if moffile != '-':
mofdir = os.path.dirname(moffile)
if not os.path.isabs(mofdir):
mofdir = os.path.abspath(mofdir)
for idir in include_dirs:
if mofdir.startswith(idir):
break
else:
include_dirs.append(mofdir)
mofcomp = MOFCompiler(handle=comp_handle, search_paths=include_dirs,
verbose=context.verbose)
for moffile in options['moffiles']:
if moffile == '-':
mofstr = sys.stdin.read() # bytes in py2 / text in py3
if context.verbose:
print('Compiling MOF from standard input')
# The defaulting to the connection default namespace is handled
# inside of the MOF compiler.
mofcomp.compile_string(mofstr, options['namespace'])
else:
if not os.path.isabs(moffile):
moffile = os.path.abspath(moffile)
if context.verbose:
print('Compiling MOF file {0}'.format(moffile))
# The defaulting to the connection default namespace is handled
# inside of the MOF compiler.
mofcomp.compile_file(moffile, options['namespace'])
# If MOFCompileError, exception already logged by compile_string().
except MOFCompileError:
raise click.ClickException("Compile failed.")
# Otherwise display the exception itself
except Error as exc:
raise pywbem_error_exception(exc)
def cmd_server_remove_mof(context, options):
"""
Compile MOF and remove CIM objects from the server.
"""
conn = context.pywbem_server.conn
try:
context.spinner_stop()
# Define the connection to be used by the MOF compiler.
# MOFWBEMConnection writes resulting CIM objects to a local store
# but reads from the connection.
comp_handle = MOFWBEMConnection(conn=conn)
if options['dry_run']:
print('Executing in dry-run mode')
include_dirs = []
for idir in options['include']:
if not os.path.isabs(idir):
idir = os.path.abspath(idir)
include_dirs.append(idir)
for moffile in options['moffiles']:
if moffile != '-':
mofdir = os.path.dirname(moffile)
if not os.path.isabs(mofdir):
mofdir = os.path.abspath(mofdir)
for idir in include_dirs:
if mofdir.startswith(idir):
break
else:
include_dirs.append(mofdir)
# verbose messages are displayed by rollback()
mofcomp = MOFCompiler(handle=comp_handle, search_paths=include_dirs,
verbose=False)
for moffile in options['moffiles']:
if moffile == '-':
mofstr = sys.stdin.read() # bytes in py2 / text in py3
if context.verbose:
print('Compiling MOF from standard input into cache')
# The defaulting to the connection default namespace is handled
# inside of the MOF compiler.
mofcomp.compile_string(mofstr, options['namespace'])
else:
if not os.path.isabs(moffile):
moffile = os.path.abspath(moffile)
if context.verbose:
print('Compiling MOF file {0} into cache'.format(moffile))
# The defaulting to the connection default namespace is handled
# inside of the MOF compiler.
mofcomp.compile_file(moffile, options['namespace'])
# rollback the compiled objects to remove them from the target.
if not options['dry_run']:
if context.verbose:
print('Deleting CIM objects found in MOF...')
comp_handle.rollback(verbose=context.verbose)
else:
if context.verbose:
print('No deletions will be shown in dry-run mode')
# If MOFCompileError, exception already logged by compile_string().
except MOFCompileError:
raise click.ClickException("Compile failed.")
except Error as exc:
raise pywbem_error_exception(exc)
def cmd_server_schema(context, options):
"""
The schema command provides information on the CIM model in each namespace
including the CIM Schema's defined, the DMTF Release schema version, whether
the namespace/schema includes classes with the experimental qualifier, and
the count of classes for the namespace and for each schema..
"""
# The schema names that can be considered DMTF schemas and are part of
# the dmtf_cim_schema
possible_dmtf_schemas = NocaseList(['CIM', 'PRS'])
def experimental_display(value):
"""Return string Experimental or empty sting"""
return 'Experimental' if value else ''
def schema_display(schema):
"""Replace dummy name for no-schema with real text"""
if schema == "~~~":
return "(no-schema)"
return schema
def version_str(version_tuple):
"""Convert 3 integer tuple to string (1.2.3) or empty strig"""
if all(i == version_tuple[0] for i in version_tuple):
return ""
return ".".join([str(i) for i in version_tuple])
conn = context.pywbem_server.conn
wbem_server = context.pywbem_server.wbem_server
output_format = validate_output_format(context.output_format, 'TABLE')
namespace_opt = options['namespace']
# Get namespaces. This bypasses the issue whene there is no interop
# namespace
try:
namespaces = [namespace_opt] if namespace_opt else \
wbem_server.namespaces
except ModelError:
namespaces = [wbem_server.conn.default_namespace]
detail = options['detail']
rows = []
for ns in sorted(namespaces):
klasses = conn.EnumerateClasses(namespace=ns, DeepInheritance=True,
LocalOnly=True)
classes_count = len(klasses)
# namespace level variables for experimental status and max version
ns_experimental = False
ns_max_dmtf_version = [0, 0, 0]
# Dictionaries for schemas, schema_max_version and experimental status
# per schema found in the namespaces
schemas = NocaseDict() # Schema names are case independent
schema_max_ver = NocaseDict()
schema_experimental = NocaseDict()
no_schema = []
for klass in klasses:
schema_elements = klass.classname.split('_', 1)
schema = schema_elements[0] if len(schema_elements) > 1 \
else "~~~" # this is dummy for sort that is replaced later.
schemas[schema] = schemas.get(schema, 0) + 1
if len(schema_elements) < 2:
no_schema.append(klass.classname)
if schema not in schema_max_ver:
schema_max_ver[schema] = [0, 0, 0]
this_class_experimental = False
# Determine if experimental qualifier exists and set namespace
# level experimental flag.
if ns_experimental is False:
if is_experimental_class(klass):
ns_experimental = True
this_class_experimental = True
# If detail, set the schema level experimental flag
if detail:
if schema not in schema_experimental:
schema_experimental[schema] = False
if this_class_experimental:
schema_experimental[schema] = True
elif ns_experimental:
if schema_experimental[schema] is False:
if is_experimental_class(klass):
schema_experimental[schema] = True
# Get the version qualifier for this class
if 'Version' in klass.qualifiers:
version = klass.qualifiers['Version'].value
version = parse_version_value(version, klass.classname)
# update the namespace max version if this schema is a
# DMTF schema and not previously found
if schema in possible_dmtf_schemas:
if version > ns_max_dmtf_version:
ns_max_dmtf_version = version
# update the version in the schema_max_ver dictionary
if schema not in schema_max_ver or \
version > schema_max_ver[schema]:
schema_max_ver[schema] = version
# Build the table formatted output
prev_namespace = None
ns_version_str = version_str(ns_max_dmtf_version) \
if classes_count else ""
if detail:
headers = ['Namespace', 'schemas', 'classes\ncount',
'schema\nversion', 'experimental']
# Display with a line for each namespace and one for each
# schema in the namespace
# replace the dummy "~~~" with the output text
for schema in sorted(schemas.keys()):
schema_max_ver_str = version_str(schema_max_ver[schema])
# Set the namespace in first row for each new namespace found
if ns != prev_namespace:
prev_namespace = ns
ns_display = ns
else:
ns_display = ""
# Append the row for each schema in the namespace
rows.append([ns_display, # namespace. don't repeat
schema_display(schema), # CIM schema
schemas[schema], #
schema_max_ver_str, # schema version
experimental_display(schema_experimental[schema])])
else: # display non-detail report
# Display one line for each namespace with list of schemas in the
# namespace
headers = ['Namespace', 'schemas', 'classes\ncount',
'CIM schema\nversion', 'experimental']
schemas_str = ", ".join(sorted(list(six.iterkeys(schemas))))
schemas_str = schemas_str.replace('~~~', '(no-schema)')
folded_schemas = fold_strings(schemas_str, 45,
fold_list_items=False)
rows.append([ns,
folded_schemas,
classes_count,
ns_version_str,
experimental_display(ns_experimental)
])
# if output_format_is_table(context.output_format):
title = "Schema information{0} namespaces: {1};".format(
'; detail;' if detail else ";", namespace_opt or "all")
context.spinner_stop()
click.echo(format_table(rows,
headers,
title=title,
table_format=output_format))
|
py | 1a4a3281e5f325debfd1fb48ccb67f6a21cccade | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from soql.attributes import Integer, Relationship, String
from soql import Model
from soql import select
from soql import SelectClauseIsntValidSubquery
from soql import asc, desc, nulls_first, nulls_last
from tests.helpers import SoqlAssertions
class Grandparent(Model):
id = Integer('Id')
class Parent(Model):
id = Integer('Id')
name = String('Name')
age = Integer('Age')
mom = Relationship('Mom', related_model=Grandparent)
class Child(Model):
id = Integer('Id')
name = String('Name')
mom = Relationship('Mom', related_model=Parent)
dad = Relationship('Dad', related_model=Parent)
teacher = Relationship('Teacher', related_model='Teacher')
class Teacher(Model):
id = Integer('Id')
students = Relationship('Students', related_model=Child, many=True)
class SelectTest(unittest.TestCase, SoqlAssertions):
def test_select(self):
self.assertSoqlEqual(
select(Child),
"SELECT Child.Id, Child.Name "
"FROM Child"
)
def test_joins(self):
self.assertSoqlEqual(
select(Child).join(Child.mom),
"SELECT Child.Id, Child.Name, Child.Mom.Age, Child.Mom.Id, Child.Mom.Name "
"FROM Child"
)
self.assertSoqlEqual(
select(Teacher).join(Teacher.students),
"SELECT Teacher.Id, (SELECT Child.Id, Child.Name FROM Teacher.Students) "
"FROM Teacher"
)
self.assertSoqlEqual(
select(Teacher).join(Teacher.students).join(Teacher.students.mom),
"SELECT Teacher.Id, "
"(SELECT Child.Id, Child.Name, Child.Mom.Age, Child.Mom.Id, Child.Mom.Name FROM Teacher.Students) "
"FROM Teacher"
)
self.assertSoqlEqual(
select(Teacher).join(Teacher.students.mom),
"SELECT Teacher.Id, "
"(SELECT Child.Id, Child.Name, Child.Mom.Age, Child.Mom.Id, Child.Mom.Name FROM Teacher.Students) "
"FROM Teacher"
)
self.assertSoqlEqual(
select(Child).join(Child.mom.mom),
"SELECT Child.Id, Child.Name, Child.Mom.Age, "
"Child.Mom.Id, Child.Mom.Name, Child.Mom.Mom.Id "
"FROM Child"
)
self.assertSoqlEqual(
select(Teacher).join(Teacher.students.mom).join(
Teacher.students.dad),
"SELECT Teacher.Id, "
"(SELECT Child.Id, Child.Name, Child.Dad.Age, Child.Dad.Id, Child.Dad.Name, "
"Child.Mom.Age, Child.Mom.Id, Child.Mom.Name FROM Teacher.Students) "
"FROM Teacher"
)
self.assertSoqlEqual(
select(Child).join(Child.teacher.students.mom),
"SELECT Child.Id, Child.Name, Child.Teacher.Id, "
"(SELECT Child.Id, Child.Name, Child.Mom.Age, Child.Mom.Id, "
"Child.Mom.Name FROM Child.Teacher.Students) "
"FROM Child"
)
def test_filters(self):
self.assertSoqlEqual(
select(Child).where(Child.id == '123'),
"SELECT Child.Id, Child.Name "
"FROM Child "
"WHERE Child.Id = '123'"
)
self.assertSoqlEqual(
select(Child).where(Child.id == '123').where(Child.name == 'Jill'),
"SELECT Child.Id, Child.Name "
"FROM Child "
"WHERE Child.Id = '123' AND Child.Name = 'Jill'"
)
self.assertSoqlEqual(
select(Child).where(Child.name == u'CATMONKÈ-123490'),
u"SELECT Child.Id, Child.Name "
u"FROM Child "
u"WHERE Child.Name = 'CATMONKÈ-123490'"
)
def test_order_by(self):
self.assertSoqlEqual(
select(Parent).order_by(Parent.age),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age).order_by(Parent.id),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age, Parent.Id"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age, direction=desc),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age DESC"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age, direction=desc).order_by(Parent.id, direction=asc),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age DESC, Parent.Id ASC"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age, direction=asc, nulls_position=nulls_first),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age ASC NULLS FIRST"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age, direction=desc, nulls_position=nulls_last),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age DESC NULLS LAST"
)
def test_count(self):
self.assertSoqlEqual(
select(Child).count(),
"SELECT COUNT() "
"FROM Child"
)
def test_offset_and_limit(self):
self.assertSoqlEqual(
select(Child).limit(100),
"SELECT Child.Id, Child.Name "
"FROM Child "
"LIMIT 100"
)
self.assertSoqlEqual(
select(Child).offset(100),
"SELECT Child.Id, Child.Name "
"FROM Child "
"OFFSET 100"
)
self.assertSoqlEqual(
select(Parent).order_by(Parent.age).offset(100).limit(100),
"SELECT Parent.Age, Parent.Id, Parent.Name "
"FROM Parent "
"ORDER BY Parent.Age "
"LIMIT 100 "
"OFFSET 100"
)
def test_override_columns(self):
self.assertSoqlEqual(
select(Parent).columns(Parent.id),
"SELECT Parent.Id "
"FROM Parent"
)
self.assertSoqlEqual(
select(Parent).columns(Parent.id, Parent.name),
"SELECT Parent.Id, Parent.Name "
"FROM Parent"
)
def test_subquery(self):
self.assertSoqlEqual(
select(Parent).columns(Parent.id).subquery(),
"(SELECT Parent.Id FROM Parent)"
)
subquery = select(Parent).columns(Parent.name).subquery()
self.assertSoqlEqual(
select(Child).where(Child.name.in_(subquery)),
"SELECT Child.Id, Child.Name "
"FROM Child "
"WHERE Child.Name IN (SELECT Parent.Name FROM Parent)"
)
with self.assertRaises(SelectClauseIsntValidSubquery):
select(Parent).offset(100).subquery()
|
py | 1a4a32c4d107dbb0aaafdf77fb6a832f1c837a2a | import numpy as np
from torch.nn import functional as F
from ConSSL.utils import _PIL_AVAILABLE
from ConSSL.utils.warnings import warn_missing_pkg
if _PIL_AVAILABLE:
from PIL import Image
else: # pragma: no cover
warn_missing_pkg('PIL', pypi_name='Pillow')
class RandomTranslateWithReflect:
"""
Translate image randomly
Translate vertically and horizontally by n pixels where
n is integer drawn uniformly independently for each axis
from [-max_translation, max_translation].
Fill the uncovered blank area with reflect padding.
"""
def __init__(self, max_translation):
if not _PIL_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError("You want to use `Pillow` which is not installed yet.")
self.max_translation = max_translation
def __call__(self, old_image):
xtranslation, ytranslation = np.random.randint(-self.max_translation, self.max_translation + 1, size=2)
xpad, ypad = abs(xtranslation), abs(ytranslation)
xsize, ysize = old_image.size
flipped_lr = old_image.transpose(Image.FLIP_LEFT_RIGHT)
flipped_tb = old_image.transpose(Image.FLIP_TOP_BOTTOM)
flipped_both = old_image.transpose(Image.ROTATE_180)
new_image = Image.new("RGB", (xsize + 2 * xpad, ysize + 2 * ypad))
new_image.paste(old_image, (xpad, ypad))
new_image.paste(flipped_lr, (xpad + xsize - 1, ypad))
new_image.paste(flipped_lr, (xpad - xsize + 1, ypad))
new_image.paste(flipped_tb, (xpad, ypad + ysize - 1))
new_image.paste(flipped_tb, (xpad, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad - xsize + 1, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad + xsize - 1, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad - xsize + 1, ypad + ysize - 1))
new_image.paste(flipped_both, (xpad + xsize - 1, ypad + ysize - 1))
new_image = new_image.crop(
(xpad - xtranslation, ypad - ytranslation, xpad + xsize - xtranslation, ypad + ysize - ytranslation)
)
return new_image
class Patchify(object):
def __init__(self, patch_size, overlap_size):
self.patch_size = patch_size
self.overlap_size = self.patch_size - overlap_size
def __call__(self, x):
x = x.unsqueeze(0)
b, c, h, w = x.size()
# patch up the images
# (b, c, h, w) -> (b, c*patch_size, L)
x = F.unfold(x, kernel_size=self.patch_size, stride=self.overlap_size)
# (b, c*patch_size, L) -> (b, nb_patches, width, height)
x = x.transpose(2, 1).contiguous().view(b, -1, self.patch_size, self.patch_size)
# reshape to have (b x patches, c, h, w)
x = x.view(-1, c, self.patch_size, self.patch_size)
x = x.squeeze(0)
return x
|
py | 1a4a33a06af4fcc47bfd579990ab1f1d803fe59d | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rocthrust(CMakePackage):
"""Thrust is a parallel algorithm library. This library has been ported to
HIP/ROCm platform, which uses the rocPRIM library. The HIP ported
library works on HIP/ROCm platforms"""
homepage = "https://github.com/ROCmSoftwarePlatform/rocThrust"
url = "https://github.com/ROCmSoftwarePlatform/rocThrust/archive/rocm-3.10.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala']
version('3.10.0', sha256='31bea6cd19a0ffa15e4ab50ecde2402ea5aaa182149cfab98242357e41f1805b')
version('3.9.0', sha256='65f5e74d72c5aaee90459468d693b212af7d56e31098ee8237b18d1b4d620eb0')
version('3.8.0', sha256='39350aeb8bfbcd09e387717b2a05c7e3a19e0fa85ff4284b967bb8fae12f9013')
version('3.7.0', sha256='4cb923dde5eec150a566cb10d23ee5c7ce3aa892c4dea94886a89d95b90f3bdd')
version('3.5.0', sha256='0d1bac1129d17bb1259fd06f5c9cb4c1620d1790b5c295b866fb3442d18923cb')
variant('build_type', default='Release', values=("Release", "Debug"),
description='CMake build type')
depends_on('cmake@3:', type='build')
depends_on('numactl', when='@3.7.0:')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0']:
depends_on('hip@' + ver, type='build', when='@' + ver)
depends_on('rocm-device-libs@' + ver, type='build', when='@' + ver)
depends_on('comgr@' + ver, type='build', when='@' + ver)
depends_on('hsa-rocr-dev@' + ver, type='build', when='@' + ver)
depends_on('rocprim@' + ver, type='build', when='@' + ver)
def setup_build_environment(self, env):
env.set('CXX', self.spec['hip'].hipcc)
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_MODULE_PATH={0}/cmake'.format(spec['hip'].prefix)
]
return args
|
py | 1a4a341d631ea11a1d8d9d25e6a110463af38497 | import numpy as np
import pandas as pd
import os
def read_data():
# set the path of the raw data
raw_data_path = os.path.join(os.path.pardir,'data','raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
# read the data with all default parameters
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
#We don't have the Survived field in Test, so let's fill it with a default so we can
#concat test and train together
test_df['Survived'] = -888
df = pd.concat((train_df, test_df), axis=0)
return df
def process_data(df):
# using the method chaining concept - this is different from the code we wrote in ecah cell
# we can chain methods, and the next function uses the output of the previous
return (df
# create title attribute - then add this
.assign(Title = lambda x: x.Name.map(get_title))
# working missing values - start with this
.pipe(fill_missing_values) #This lets us apply a function into the data frame
# create fare bin feature
.assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low','low','high','very_high']))
# create age state
.assign(AgeState = lambda x : np.where(x.Age >= 18, 'Adult','Child'))
.assign(FamilySize = lambda x : x.Parch + x.SibSp + 1)
.assign(IsMother = lambda x : np.where(((x.Sex == 'female') & (x.Parch > 0) & (x.Age > 18) & (x.Title != 'Miss')), 1, 0))
# create deck feature
.assign(Cabin = lambda x: np.where(x.Cabin == 'T', np.nan, x.Cabin))
.assign(Deck = lambda x : x.Cabin.map(get_deck))
# feature encoding
.assign(IsMale = lambda x : np.where(x.Sex == 'male', 1,0))
.pipe(pd.get_dummies, columns=['Deck', 'Pclass','Title', 'Fare_Bin', 'Embarked','AgeState'])
# add code to drop unnecessary columns
.drop(['Cabin','Name','Ticket','Parch','SibSp','Sex'], axis=1) #no need for inplace option here, since we are using chaining
# reorder columns
.pipe(reorder_columns)
)
def get_title(name):
title_group = {'mr' : 'Mr',
'mrs' : 'Mrs',
'miss' : 'Miss',
'master' : 'Master',
'don' : 'Sir',
'rev' : 'Sir',
'dr' : 'Officer',
'mme' : 'Mrs',
'ms' : 'Mrs',
'major' : 'Officer',
'lady' : 'Lady',
'sir' : 'Sir',
'mlle' : 'Miss',
'col' : 'Officer',
'capt' : 'Officer',
'the countess' : 'Lady',
'jonkheer' : 'Sir',
'dona' : 'Lady'
}
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
title = title.strip().lower()
return title_group[title]
def get_deck(cabin):
return np.where(pd.notnull(cabin),str(cabin)[0].upper(),'Z')
def fill_missing_values(df):
# embarked
df.Embarked.fillna('C', inplace=True)
# fare
median_fare = df[(df.Pclass == 3) & (df.Embarked == 'S')]['Fare'].median()
df.Fare.fillna(median_fare, inplace=True)
# age
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median , inplace=True)
return df
def reorder_columns(df):
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
return df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir,'data','processed')
write_train_path = os.path.join(processed_data_path, 'train.csv')
write_test_path = os.path.join(processed_data_path, 'test.csv')
# train data
df[df.Survived != -888].to_csv(write_train_path)
# test data
columns = [column for column in df.columns if column != 'Survived']
df[df.Survived == -888][columns].to_csv(write_test_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df) |
py | 1a4a34ae504497234328c6f884d5686647402a41 | import functools
import operator
from collections import namedtuple
from json import dumps, loads
from galaxy_test.base.populators import skip_without_tool, summarize_instance_history_on_error
from .test_workflows import BaseWorkflowsApiTestCase
class WorkflowExtractionApiTestCase(BaseWorkflowsApiTestCase):
history_id: str
def setUp(self):
super().setUp()
self.history_id = self.dataset_populator.new_history()
@skip_without_tool("cat1")
@summarize_instance_history_on_error
def test_extract_from_history(self):
# Run the simple test workflow and extract it back out from history
cat1_job_id = self.__setup_and_run_cat1_workflow(history_id=self.history_id)
contents = self._history_contents()
input_hids = [c["hid"] for c in contents[0:2]]
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="extract_from_history_basic",
dataset_ids=input_hids,
job_ids=[cat1_job_id],
)
self.assertEqual(downloaded_workflow["name"], "test import from history")
self.__assert_looks_like_cat1_example_workflow(downloaded_workflow)
@summarize_instance_history_on_error
def test_extract_with_copied_inputs(self):
old_history_id = self.dataset_populator.new_history()
# Run the simple test workflow and extract it back out from history
self.__setup_and_run_cat1_workflow(history_id=old_history_id)
# Bug cannot mess up hids or these don't extract correctly. See Trello card here:
# https://trello.com/c/mKzLbM2P
# # create dummy dataset to complicate hid mapping
# self.dataset_populator.new_dataset( history_id, content="dummydataset" )
# offset = 1
offset = 0
old_contents = self._history_contents(old_history_id)
for old_dataset in old_contents:
self.__copy_content_to_history(self.history_id, old_dataset)
new_contents = self._history_contents()
input_hids = [c["hid"] for c in new_contents[(offset + 0):(offset + 2)]]
cat1_job_id = self.__job_id(self.history_id, new_contents[(offset + 2)]["id"])
def reimport_jobs_ids(new_history_id):
return [j["id"] for j in self.dataset_populator.history_jobs(new_history_id) if j["tool_id"] == "cat1"]
downloaded_workflow = self._extract_and_download_workflow(
dataset_ids=input_hids,
job_ids=[cat1_job_id],
)
self.__assert_looks_like_cat1_example_workflow(downloaded_workflow)
@summarize_instance_history_on_error
def test_extract_with_copied_inputs_reimported(self):
old_history_id = self.dataset_populator.new_history()
# Run the simple test workflow and extract it back out from history
self.__setup_and_run_cat1_workflow(history_id=old_history_id)
offset = 0
old_contents = self._history_contents(old_history_id)
for old_dataset in old_contents:
self.__copy_content_to_history(self.history_id, old_dataset)
new_contents = self._history_contents()
input_hids = [c["hid"] for c in new_contents[(offset + 0):(offset + 2)]]
def reimport_jobs_ids(new_history_id):
return [j["id"] for j in self.dataset_populator.history_jobs(new_history_id) if j["tool_id"] == "cat1"]
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_with_copied_inputs",
reimport_jobs_ids=reimport_jobs_ids,
dataset_ids=input_hids,
)
self.__assert_looks_like_cat1_example_workflow(downloaded_workflow)
@skip_without_tool("random_lines1")
@summarize_instance_history_on_error
def test_extract_mapping_workflow_from_history(self):
hdca, job_id1, job_id2 = self.__run_random_lines_mapped_over_pair(self.history_id)
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="extract_from_history_with_mapping",
dataset_collection_ids=[hdca["hid"]],
job_ids=[job_id1, job_id2],
)
self.__assert_looks_like_randomlines_mapping_workflow(downloaded_workflow)
def test_extract_copied_mapping_from_history(self):
old_history_id = self.dataset_populator.new_history()
hdca, job_id1, job_id2 = self.__run_random_lines_mapped_over_pair(old_history_id)
old_contents = self._history_contents(old_history_id)
for old_content in old_contents:
self.__copy_content_to_history(self.history_id, old_content)
# API test is somewhat contrived since there is no good way
# to retrieve job_id1, job_id2 like this for copied dataset
# collections I don't think.
downloaded_workflow = self._extract_and_download_workflow(
dataset_collection_ids=[hdca["hid"]],
job_ids=[job_id1, job_id2],
)
self.__assert_looks_like_randomlines_mapping_workflow(downloaded_workflow)
def test_extract_copied_mapping_from_history_reimported(self):
import unittest
raise unittest.SkipTest("Mapping connection for copied collections not yet implemented in history import/export")
old_history_id = self.dataset_populator.new_history()
hdca, job_id1, job_id2 = self.__run_random_lines_mapped_over_singleton(old_history_id)
old_contents = self._history_contents(old_history_id)
for old_content in old_contents:
self.__copy_content_to_history(self.history_id, old_content)
def reimport_jobs_ids(new_history_id):
rval = [j["id"] for j in self.dataset_populator.history_jobs(new_history_id) if j["tool_id"] == "random_lines1"]
assert len(rval) == 2
print(rval)
return rval
# API test is somewhat contrived since there is no good way
# to retrieve job_id1, job_id2 like this for copied dataset
# collections I don't think.
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_from_history_with_mapped_collection_reimport",
reimport_jobs_ids=reimport_jobs_ids,
reimport_wait_on_history_length=9, # see comments in _extract about eliminating this magic constant.
dataset_collection_ids=[hdca["hid"]],
)
self.__assert_looks_like_randomlines_mapping_workflow(downloaded_workflow)
@skip_without_tool("random_lines1")
@skip_without_tool("multi_data_param")
def test_extract_reduction_from_history(self):
hdca = self.dataset_collection_populator.create_pair_in_history(self.history_id, contents=["1 2 3\n4 5 6", "7 8 9\n10 11 10"]).json()
hdca_id = hdca["id"]
inputs1 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]},
"num_lines": 2
}
implicit_hdca1, job_id1 = self._run_tool_get_collection_and_job_id(self.history_id, "random_lines1", inputs1)
inputs2 = {
"f1": {"src": "hdca", "id": implicit_hdca1["id"]},
"f2": {"src": "hdca", "id": implicit_hdca1["id"]},
}
reduction_run_output = self.dataset_populator.run_tool(
tool_id="multi_data_param",
inputs=inputs2,
history_id=self.history_id,
)
job_id2 = reduction_run_output["jobs"][0]["id"]
self.dataset_populator.wait_for_job(job_id2, assert_ok=True)
self.dataset_populator.wait_for_history(self.history_id, assert_ok=True)
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="extract_from_history_with_reduction",
dataset_collection_ids=[hdca["hid"]],
job_ids=[job_id1, job_id2],
)
assert len(downloaded_workflow["steps"]) == 3
collect_step_idx = self._assert_first_step_is_paired_input(downloaded_workflow)
tool_steps = self._get_steps_of_type(downloaded_workflow, "tool", expected_len=2)
random_lines_map_step = tool_steps[0]
reduction_step = tool_steps[1]
assert "tool_id" in random_lines_map_step, random_lines_map_step
assert random_lines_map_step["tool_id"] == "random_lines1", random_lines_map_step
assert "input_connections" in random_lines_map_step, random_lines_map_step
random_lines_input_connections = random_lines_map_step["input_connections"]
assert "input" in random_lines_input_connections, random_lines_map_step
random_lines_input = random_lines_input_connections["input"]
assert random_lines_input["id"] == collect_step_idx
reduction_step_input = reduction_step["input_connections"]["f1"]
assert reduction_step_input["id"] == random_lines_map_step["id"]
@skip_without_tool("collection_paired_test")
def test_extract_workflows_with_dataset_collections(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input_collection
- tool_id: collection_paired_test
state:
f1:
$link: text_input1
test_data:
text_input1:
collection_type: paired
""")
job_id = self._job_id_for_tool(jobs_summary.jobs, "collection_paired_test")
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="extract_from_history_with_basic_collections",
dataset_collection_ids=["1"],
job_ids=[job_id],
)
self.__check_workflow(
downloaded_workflow,
step_count=2,
verify_connected=True,
data_input_count=0,
data_collection_input_count=1,
tool_ids=["collection_paired_test"]
)
collection_step = self._get_steps_of_type(downloaded_workflow, "data_collection_input", expected_len=1)[0]
collection_step_state = loads(collection_step["tool_state"])
self.assertEqual(collection_step_state["collection_type"], "paired")
@skip_without_tool("cat_collection")
def test_subcollection_mapping(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input_collection
- label: noop
tool_id: cat1
state:
input1:
$link: text_input1
- tool_id: cat_collection
state:
input1:
$link: noop/out_file1
test_data:
text_input1:
collection_type: "list:paired"
""")
job1_id = self._job_id_for_tool(jobs_summary.jobs, "cat1")
job2_id = self._job_id_for_tool(jobs_summary.jobs, "cat_collection")
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_workflows_with_subcollection_mapping",
dataset_collection_ids=["1"],
job_ids=[job1_id, job2_id],
)
self.__check_workflow(
downloaded_workflow,
step_count=3,
verify_connected=True,
data_input_count=0,
data_collection_input_count=1,
tool_ids=["cat_collection", "cat1"],
)
collection_step = self._get_steps_of_type(downloaded_workflow, "data_collection_input", expected_len=1)[0]
collection_step_state = loads(collection_step["tool_state"])
self.assertEqual(collection_step_state["collection_type"], "list:paired")
@skip_without_tool("cat_list")
@skip_without_tool("collection_creates_dynamic_nested")
def test_subcollection_reduction(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
creates_nested_list:
tool_id: collection_creates_dynamic_nested
reduce_nested_list:
tool_id: cat_list
in:
input1: creates_nested_list/list_output
""")
job1_id = self._job_id_for_tool(jobs_summary.jobs, "cat_list")
job2_id = self._job_id_for_tool(jobs_summary.jobs, "collection_creates_dynamic_nested")
self._extract_and_download_workflow(
reimport_as="test_extract_workflows_with_subcollection_reduction",
dataset_collection_ids=["1"],
job_ids=[job1_id, job2_id],
)
# TODO: refactor workflow extraction to not rely on HID, so we can actually properly connect
# this workflow
@skip_without_tool("collection_split_on_column")
def test_extract_workflow_with_output_collections(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input
- label: text_input2
type: input
- label: cat_inputs
tool_id: cat1
state:
input1:
$link: text_input1
queries:
- input2:
$link: text_input2
- label: split_up
tool_id: collection_split_on_column
state:
input1:
$link: cat_inputs/out_file1
- tool_id: cat_list
state:
input1:
$link: split_up/split_output
test_data:
text_input1: "samp1\t10.0\nsamp2\t20.0\n"
text_input2: "samp1\t30.0\nsamp2\t40.0\n"
""")
tool_ids = ["cat1", "collection_split_on_column", "cat_list"]
job_ids = [functools.partial(self._job_id_for_tool, jobs_summary.jobs)(_) for _ in tool_ids]
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_workflows_with_output_collections",
dataset_ids=["1", "2"],
job_ids=job_ids,
)
self.__check_workflow(
downloaded_workflow,
step_count=5,
verify_connected=True,
data_input_count=2,
data_collection_input_count=0,
tool_ids=tool_ids,
)
@skip_without_tool("collection_creates_pair")
@summarize_instance_history_on_error
def test_extract_with_mapped_output_collections(self):
jobs_summary = self._run_workflow("""
class: GalaxyWorkflow
steps:
- label: text_input1
type: input_collection
- label: cat_inputs
tool_id: cat1
state:
input1:
$link: text_input1
- label: pair_off
tool_id: collection_creates_pair
state:
input1:
$link: cat_inputs/out_file1
- label: cat_pairs
tool_id: cat_collection
state:
input1:
$link: pair_off/paired_output
- tool_id: cat_list
state:
input1:
$link: cat_pairs/out_file1
test_data:
text_input1:
collection_type: list
elements:
- identifier: samp1
content: "samp1\t10.0\nsamp2\t20.0\n"
- identifier: samp2
content: "samp1\t30.0\nsamp2\t40.0\n"
""")
tool_ids = ["cat1", "collection_creates_pair", "cat_collection", "cat_list"]
job_ids = [functools.partial(self._job_id_for_tool, jobs_summary.jobs)(_) for _ in tool_ids]
downloaded_workflow = self._extract_and_download_workflow(
reimport_as="test_extract_workflows_with_mapped_output_collections",
dataset_collection_ids=["1"],
job_ids=job_ids,
)
self.__check_workflow(
downloaded_workflow,
step_count=5,
verify_connected=True,
data_input_count=0,
data_collection_input_count=1,
tool_ids=tool_ids,
)
def _job_id_for_tool(self, jobs, tool_id):
return self._job_for_tool(jobs, tool_id)["id"]
def _job_for_tool(self, jobs, tool_id):
tool_jobs = [j for j in jobs if j["tool_id"] == tool_id]
if not tool_jobs:
raise ValueError(f"Failed to find job for tool {tool_id}")
# if len( tool_jobs ) > 1:
# assert False, "Found multiple jobs for tool %s" % tool_id
return tool_jobs[-1]
def __run_random_lines_mapped_over_pair(self, history_id):
hdca = self.dataset_collection_populator.create_pair_in_history(history_id, contents=["1 2 3\n4 5 6", "7 8 9\n10 11 10"]).json()
hdca_id = hdca["id"]
inputs1 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]},
"num_lines": 2
}
implicit_hdca1, job_id1 = self._run_tool_get_collection_and_job_id(history_id, "random_lines1", inputs1)
inputs2 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": implicit_hdca1["id"]}]},
"num_lines": 1
}
_, job_id2 = self._run_tool_get_collection_and_job_id(history_id, "random_lines1", inputs2)
return hdca, job_id1, job_id2
def __run_random_lines_mapped_over_singleton(self, history_id):
hdca = self.dataset_collection_populator.create_list_in_history(history_id, contents=["1 2 3\n4 5 6"]).json()
hdca_id = hdca["id"]
inputs1 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]},
"num_lines": 2
}
implicit_hdca1, job_id1 = self._run_tool_get_collection_and_job_id(history_id, "random_lines1", inputs1)
inputs2 = {
"input": {"batch": True, "values": [{"src": "hdca", "id": implicit_hdca1["id"]}]},
"num_lines": 1
}
_, job_id2 = self._run_tool_get_collection_and_job_id(history_id, "random_lines1", inputs2)
return hdca, job_id1, job_id2
def __assert_looks_like_randomlines_mapping_workflow(self, downloaded_workflow):
# Assert workflow is input connected to a tool step with one output
# connected to another tool step.
assert len(downloaded_workflow["steps"]) == 3
collect_step_idx = self._assert_first_step_is_paired_input(downloaded_workflow)
tool_steps = self._get_steps_of_type(downloaded_workflow, "tool", expected_len=2)
tool_step_idxs = []
tool_input_step_idxs = []
for tool_step in tool_steps:
self._assert_has_key(tool_step["input_connections"], "input")
input_step_idx = tool_step["input_connections"]["input"]["id"]
tool_step_idxs.append(tool_step["id"])
tool_input_step_idxs.append(input_step_idx)
assert collect_step_idx not in tool_step_idxs
assert tool_input_step_idxs[0] == collect_step_idx
assert tool_input_step_idxs[1] == tool_step_idxs[0]
def __assert_looks_like_cat1_example_workflow(self, downloaded_workflow):
assert len(downloaded_workflow["steps"]) == 3
input_steps = self._get_steps_of_type(downloaded_workflow, "data_input", expected_len=2)
tool_step = self._get_steps_of_type(downloaded_workflow, "tool", expected_len=1)[0]
input1 = tool_step["input_connections"]["input1"]
input2 = tool_step["input_connections"]["queries_0|input2"]
self.assertEqual(input_steps[0]["id"], input1["id"])
self.assertEqual(input_steps[1]["id"], input2["id"])
def _history_contents(self, history_id=None):
if history_id is None:
history_id = self.history_id
return self._get(f"histories/{history_id}/contents").json()
def __copy_content_to_history(self, history_id, content):
if content["history_content_type"] == "dataset":
payload = dict(
source="hda",
content=content["id"]
)
response = self._post(f"histories/{history_id}/contents/datasets", payload, json=True)
else:
payload = dict(
source="hdca",
content=content["id"]
)
response = self._post(f"histories/{history_id}/contents/dataset_collections", payload, json=True)
self._assert_status_code_is(response, 200)
return response.json()
def __setup_and_run_cat1_workflow(self, history_id):
workflow = self.workflow_populator.load_workflow(name="test_for_extract")
workflow_request, history_id, workflow_id = self._setup_workflow_run(workflow, history_id=history_id)
run_workflow_response = self._post(f"workflows/{workflow_id}/invocations", data=workflow_request)
self._assert_status_code_is(run_workflow_response, 200)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
return self.__cat_job_id(history_id)
def _assert_first_step_is_paired_input(self, downloaded_workflow):
collection_steps = self._get_steps_of_type(downloaded_workflow, "data_collection_input", expected_len=1)
collection_step = collection_steps[0]
collection_step_state = loads(collection_step["tool_state"])
self.assertEqual(collection_step_state["collection_type"], "paired")
collect_step_idx = collection_step["id"]
return collect_step_idx
def _extract_and_download_workflow(self, **extract_payload):
reimport_as = extract_payload.get("reimport_as")
if reimport_as:
history_name = reimport_as
history_id = self.history_id
self.dataset_populator.wait_for_history(history_id)
self.dataset_populator.rename_history(history_id, history_name)
history_length = extract_payload.get("reimport_wait_on_history_length")
if history_length is None:
# sometimes this won't be the same (i.e. datasets copied from outside the history
# that need to be included in target history for collections), but we can provide
# a reasonable default for fully in-history imports.
history_length = self.dataset_populator.history_length(history_id)
new_history_id = self.dataset_populator.reimport_history(
history_id, history_name, wait_on_history_length=history_length, export_kwds={}, api_key=self.galaxy_interactor.api_key
)
# wait a little more for those jobs, todo fix to wait for history imported false or
# for a specific number of jobs...
import time
time.sleep(1)
if "reimport_jobs_ids" in extract_payload:
new_history_job_ids = extract_payload["reimport_jobs_ids"](new_history_id)
extract_payload["job_ids"] = new_history_job_ids
else:
# Assume no copying or anything so just straight map job ids by index.
# Jobs are created after datasets, need to also wait on those...
history_jobs = [j for j in self.dataset_populator.history_jobs(history_id) if j["tool_id"] != "__EXPORT_HISTORY__"]
new_history_jobs = [j for j in self.dataset_populator.history_jobs(new_history_id) if j["tool_id"] != "__EXPORT_HISTORY__"]
history_job_ids = [j["id"] for j in history_jobs]
new_history_job_ids = [j["id"] for j in new_history_jobs]
assert len(history_job_ids) == len(new_history_job_ids)
if "job_ids" in extract_payload:
job_ids = extract_payload["job_ids"]
new_job_ids = []
for job_id in job_ids:
new_job_ids.append(new_history_job_ids[history_job_ids.index(job_id)])
extract_payload["job_ids"] = new_job_ids
self.history_id = new_history_id
if "from_history_id" not in extract_payload:
extract_payload["from_history_id"] = self.history_id
if "workflow_name" not in extract_payload:
extract_payload["workflow_name"] = "test import from history"
for key in "job_ids", "dataset_ids", "dataset_collection_ids":
if key in extract_payload:
value = extract_payload[key]
if isinstance(value, list):
extract_payload[key] = dumps(value)
create_workflow_response = self._post("workflows", data=extract_payload)
self._assert_status_code_is(create_workflow_response, 200)
new_workflow_id = create_workflow_response.json()["id"]
download_response = self._get(f"workflows/{new_workflow_id}/download")
self._assert_status_code_is(download_response, 200)
downloaded_workflow = download_response.json()
return downloaded_workflow
def _get_steps_of_type(self, downloaded_workflow, type, expected_len=None):
steps = [s for s in downloaded_workflow["steps"].values() if s["type"] == type]
if expected_len is not None:
n = len(steps)
assert n == expected_len, "Expected %d steps of type %s, found %d" % (expected_len, type, n)
return sorted(steps, key=operator.itemgetter("id"))
def __job_id(self, history_id, dataset_id):
url = f"histories/{history_id}/contents/{dataset_id}/provenance"
prov_response = self._get(url, data=dict(follow=False))
self._assert_status_code_is(prov_response, 200)
return prov_response.json()["job_id"]
def __cat_job_id(self, history_id):
data = dict(history_id=history_id, tool_id="cat1")
jobs_response = self._get("jobs", data=data)
self._assert_status_code_is(jobs_response, 200)
cat1_job_id = jobs_response.json()[0]["id"]
return cat1_job_id
def _run_tool_get_collection_and_job_id(self, history_id, tool_id, inputs):
run_output1 = self.dataset_populator.run_tool(
tool_id=tool_id,
inputs=inputs,
history_id=history_id,
)
implicit_hdca = run_output1["implicit_collections"][0]
job_id = run_output1["jobs"][0]["id"]
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
return implicit_hdca, job_id
def __check_workflow(
self,
workflow,
step_count=None,
verify_connected=False,
data_input_count=None,
data_collection_input_count=None,
tool_ids=None,
):
steps = workflow['steps']
if step_count is not None:
assert len(steps) == step_count
if verify_connected:
self.__assert_connected(workflow, steps)
if tool_ids is not None:
tool_steps = self._get_steps_of_type(workflow, "tool")
found_steps = set(map(operator.itemgetter("tool_id"), tool_steps))
expected_steps = set(tool_ids)
assert found_steps == expected_steps
if data_input_count is not None:
self._get_steps_of_type(workflow, "data_input", expected_len=data_input_count)
if data_collection_input_count is not None:
self._get_steps_of_type(workflow, "data_collection_input", expected_len=data_collection_input_count)
def __assert_connected(self, workflow, steps):
disconnected_inputs = []
for value in steps.values():
if value['type'] == "tool":
input_connections = value["input_connections"]
if not input_connections:
disconnected_inputs.append(value)
if disconnected_inputs:
template = "%d steps disconnected in extracted workflow - disconnectect steps are %s - workflow is %s"
message = template % (len(disconnected_inputs), disconnected_inputs, workflow)
raise AssertionError(message)
RunJobsSummary = namedtuple('RunJobsSummary', ['history_id', 'workflow_id', 'inputs', 'jobs'])
|
py | 1a4a36c0f309d11038b5447a57ad626c6b6673a0 |
# coding: utf-8
# # Chart presentation (8) - Changing hovertext (1)
# In the last lessons we learnt how to use Pandas' <code>df.apply()</code> in conjunction with a user-defined or a <code>lambda</code> function to create a column in our DataFrame to store the value for the hovertext.
#
# In this lesson we'll apply what we've learnt to the stacked quantity C02 emissions area plot, and in the next we'll update the stacked proportional C02 emissions area plot.
#
# We will get the data and rewrite the code which creates the chart rather than reloading the charts as we need to manipulate the DataFrames from which they were created in order to make the hovertext field.
# ## Module Imports
# In[1]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# In[2]:
#lets us see the charts in an iPython Notebook
pyo.offline.init_notebook_mode() # run at the start of every ipython
# ### Stacked quantity area plot
#
# Let's get the emissions data again:
# In[3]:
emissions = pd.read_csv("http://richard-muir.com/data/public/csv/TotalCo2EmissionsByCountry.csv", index_col=0)
emissions.head()
# ### Writing a function
#
# Seeing as we have to rewrite the code for this chart, let's try to do it as programmatically as we can. In lesson 13 of the Lineplot section we used a very long-winded way of making this chart, however in the subsequent lessons we found that we could reduce the amount of code by using the <code>df.cumsum()</code> method. We then further generalised the code by writing a function to create a stacked proportional area plot; we'll use the ideas from that function as a base to write one for a stacked quantity area plot.
#
# If you'd like a challenge, go ahead and write a function which makes a stacked quantity area plot (you can base this code on the stacked proportional area), alternatively you can code along with me!
#
# This function will have six arguments (the same five as for creating the stacked proportional area plot), plus one more which will define some of the text that goes in the hovertext field. As before, I'll write the explanation here and only include it in the finished function to save on space. We'll also test the function as we go.
# In[4]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
"""
A function which manipulates the data into the correct format to produce a stacked quantity area plot with Plotly.
Takes five arguments:
df - a pandas DataFrame
time - the time element of the data, must be a column in the DataFrame
cols - the name of the columns in the DataFrame which you want to include in the area plot
hover - the text common to every hoverlabel
title - the title of the chart
yaxisTitle - the yaxis title of the chart (the xaxis title comes from the time variable)
"""
# We need to reduce the input DataFrame down to only the columns which we need. You can also reuse this bit of code from the stacked proportional area function:
# In[5]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
return stackedAreaDF
test = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
test.head()
# We don't need to create a 'Total' column because we're not calculating proportions, but we do need to calculate the cumulative sum of only the country columns:
# In[6]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
cumulative = stackedAreaDF[cols].cumsum(axis = 1)
return cumulative
test = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
test.head()
# In order to create the hovertext column, we need the original values for the emissions. I'm going to merge the two DataFrames by their index. Because they both have the same number of rows, this is not a problem - each row in one DataFrame will map correctly to its counterpart in the other.
#
# I also need to create a suffix for the column names for each DataFrame - because both have the same names, we need to know how to refer to the correct column:
#
# In[7]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
cumulative = stackedAreaDF[cols].cumsum(axis = 1)
cumulativeAndOrig = cumulative.merge(stackedAreaDF,
left_index = True,
right_index = True,
suffixes = ('_c','_o'))
return cumulativeAndOrig
test = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
test.head()
# Now we can use the Pandas' <code>df.apply(lambda x : x)</code> construction that we learnt in the previous lesson to create a text column for each country. This will also use the <code>hover</code> variable that we pass to the function:
# In[8]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
cumulative = stackedAreaDF[cols].cumsum(axis = 1)
cumulAndOrig = cumulative.merge(stackedAreaDF,
left_index = True,
right_index = True,
suffixes = ('_c','_o'))
for col in cols:
cumulAndOrig[col + '_t'] = "<b>" + str(col)[:-6] + "</b><br>" + str(hover) + cumulAndOrig[col + "_o"].apply(lambda x:
"{:,}Kt".format(int(round(x, 0))))
return cumulAndOrig
test = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
test.head(1)
# Now we can create our traces inside the same loop which creates the text, then create our Data, Layout and Figure objects before plotting the chart! I'm also going to return the Figure object so we can send it to the Plotly cloud:
# In[9]:
def createStackedQuantArea(df, time, cols, hover, title, yaxisTitle):
"""
A function which manipulates the data into the correct format to produce a stacked quantity area plot with Plotly.
Takes five arguments:
df - a pandas DataFrame
time - the time element of the data, must be a column in the DataFrame
cols - the name of the columns in the DataFrame which you want to include in the area plot
title - the title of the chart
yaxisTitle - the yaxis title of the chart (the xaxis title comes from the time variable)
"""
traces = []
stackedAreaDF = df.loc[:, ([time] + cols)]
stackedAreaDF.fillna(0, inplace=True)
cumulative = stackedAreaDF[cols].cumsum(axis = 1)
cumulAndOrig = cumulative.merge(stackedAreaDF,
left_index = True,
right_index = True,
suffixes = ('_c','_o'))
for col in cols:
cumulAndOrig[col + '_t'] = "<b>" + str(col)[:-6] + "</b><br>" + str(hover) + cumulAndOrig[col + "_o"].apply(lambda x:
"{:,}Kt".format(int(round(x, 0))))
traces.append({'type' : 'scatter',
'x' : cumulAndOrig[time],
'y' : cumulAndOrig[col + "_c"],
'text' : cumulAndOrig[col + "_t"],
'hoverinfo' : 'text+x',
'name' : col[:-6],
'mode' : 'lines',
'fill' : 'tonexty'})
data = Data(traces)
layout = {'title' : title,
'xaxis' : {'title' : time},
'yaxis' : {'title' : yaxisTitle,
'ticksuffix' : ' Kt'},
'hovermode' : 'closest'}
fig = Figure(data = data, layout = layout)
pyo.iplot(fig)
return fig
# return fig
C02Quant = createStackedQuantArea(emissions, 'Year', ['United Arab Emirates | ARE','United Kingdom | GBR',
'United States | USA','China | CHN', 'India | IND'], 'Total C02 Emissions: ',
"Quantity of Co2 Emissions, 1960-2011", 'Quantity of Co2 Emissions')
py.image.save_as(C02Quant, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(03) Chart Presentation 1\Notebooks\images\Chart presentation (8) - Changing hovertext (1)\pyo.iplot-0.png")
# Let's push this chart to the Plotly cloud:
# In[10]:
py.plot(C02Quant, "C02 Emissions for UAE, USA, UK, India & China 1960 - 2011", fileopt = 'overwrite')
py.image.save_as(C02Quant, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(03) Chart Presentation 1\Notebooks\images\Chart presentation (8) - Changing hovertext (1)\py.plot-0.png")
#
# ### What have we learnt this lesson?
# In this lesson we updated some code that we'd previously written in order to set the hovertext and tickformat on the stacked quantity area plot which we previously made.
#
# In the next lesson we'll apply this to the stacked proportional area plot.
# If you have any questions, please ask in the comments section or email <a href="mailto:[email protected]">[email protected]</a>
|
py | 1a4a37590de5edb8d23026a69328ac9be8b40fcd | #!/usr/bin/python3
import json
import falcon
from lib.const import Version, Message
from lib.utility import SystemUtility, DocumentUtility, CustomJSONEncoder
from lib.resource import BaseJsonApiResource
from lib.database import Session, Server
class ServerInfoApiResource(BaseJsonApiResource):
def on_get(self, req, resp, hostname):
resp.status = falcon.HTTP_200
body = SystemUtility.get_response_base_with_body(Version.VERSION_1)
session = Session()
try:
info = session.query(Server).filter(Server.hostname == hostname).first()
self.logger.debug(info)
body['data']['ip'] = info.ip
body['data']['hostname'] = info.hostname
body['data']['key'] = ['category', 'value', 'note']
body['data']['data'] = [
{'category': 'Hostname', 'value': info.hostname, 'note': ''},
{'category': 'IP', 'value': info.ip, 'note': ''},
{'category': 'Role', 'value': info.rolename, 'note': ''},
{'category': 'Region', 'value': info.region, 'note': ''},
{'category': 'Zone', 'value': info.zone, 'note': ''}
]
except Exception as e:
self.logger.error(e)
session.rollback()
resp.status = falcon.HTTP_500
SystemUtility.set_response_metadata(
Version.VERSION_1, body, Message.RESPONSE_NG, Message.RESPONSE_DATABASE_CONNECTION_ERROR)
finally:
session.close()
resp.body = json.dumps(body, cls=CustomJSONEncoder)
|
py | 1a4a37b766da43e0c37fb783f6620beef96883d8 | %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def func(x):
a = -2
b = 10
return np.exp(a*x) * np.cos(b*x)
def func_integral(x):
a = -2
b = 10
c = 5
d = 52
return np.exp(a*x) * (c* np.sin(b*x) - np.cos(b*x)) * (1/d)
def trapezoid_core(f,x,h):
return 0.5 * h * (f(x + h) + f(x))
def trapezoid_method(f,a,b,N):
x = np.linspace(a,b,N)
h = x[1] - x[0]
fint = 0.0
for i in range(0,len(x) - 1, 1):
fint += trapezoid_core(f,x[i],h)
print("Iterations = " + str(i))
return(fint)
def simpson_core(f,x,h):
return (h/3)* (f(x) + 4*f(x+h) + f(x+2*h))
def simpsons_method(f,a,b,N):
x = np.linspace(a,b,N)
h = x[1] - x[0]
Fint = 0.0
for i in range (0, len(x) - 2, 2):
Fint += simpson_core(f,x[i],h)
if (N%2 == 0):
Fint += simpson_core(f,x[-2], 0.5 * h)
print("Iterations = " + str(i))
return Fint
def romberg_core(f,a,b,i):
h = b - a
dh = h/2.**i
K = h/2.**(i+1)
M = 0.0
for j in range(2**i):
M += f(a + 0.5*dh + j*dh)
return(K*M)
def romberg_integration(f,a,b,tol):
i = 0
imax = 1000
delta = 100.0*np.fabs(tol)
I = np.zeros(imax,dtype=float)
I[0] = 0.5*(b-a)*(f(a) + f(b))
i +=1
while(delta>tol):
I[i] = 0.5*I[i-1] + romberg_core(f,a,b,i)
delta = np.fabs( (I[i] - I[i-1]) / I[i])
print(i,I[i],I[i-1],delta)
if (delta>tol):
i += 1
if (i > imax):
print('Max iterations reached.')
raiseStopIteration('Stopping iterations after ', i)
print("Iterations = " + str(i))
return(I[i])
print("function = e^(-2x) * cos(10x)")
answer = func_integral(np.pi) - func_integral(0)
print("Integral = " + str(answer))
print(" ")
print('Trapezoid method')
print(trapezoid_method(func,0,np.pi,10))
print(" ")
print("Simpson's method")
print(simpsons_method(func,0,np.pi,10))
print(" ")
print("Romberg's method")
tolerance = 1e-6
RI = romberg_integration(func,0,np.pi,tolerance)
print(RI, (RI - answer)/answer, tolerance |
py | 1a4a388a248ad854ae63a6bc0b88a1a38a5457c9 | from .abstract import Vector, Point
from . import colors
import copy
import math
class Motion:
# Class methods
# Operations
@classmethod
def null(cls, n=3, d=2):
"""Return the neutral motion."""
# The dimension d still needs to be implemented for the vectors.
return cls([Vector.null(d=d) for i in range(n)])
neutral = zero = null
@classmethod
def sum(cls, motions):
"""Return the sum of the motions together."""
result = cls.null()
for motion in motions:
result += motion
return result
@classmethod
def average(motions):
"""Return the average of the motions."""
return cls.sum(motions) / len(motions)
# Random
@classmethod
def random(cls, n=3, d=2, borns=[-1, 1], **kwargs):
"""Create a random motion using optional minimum and maximum."""
vectors = [Vector.random(d=d, borns=borns, **kwargs) for i in range(n)]
return cls(*vectors)
# Object functions
# Initializing
def __init__(self, *vectors):
"""Create a motion using vectors."""
if len(vectors) > 0:
if isinstance(vectors[0], list):
vectors = vectors[0]
self.vectors = list(vectors)
if len(self.vectors) >= 1:
self.position.color = colors.GREEN
if len(self.vectors) >= 2:
self.velocity.color = colors.BLUE
if len(self.vectors) >= 3:
self.acceleration.color = colors.RED
# Set
def set(self, other, n=None):
"""Set the components of the motion to the components of another motion
without affecting its colors."""
if n is None: n = len(self.vectors)
for i in range(n):
self.vectors[i].set(other.vectors[i])
def showEach(self, context):
"""Show the motion on the screen from the origin of the plane."""
for vector in self.vectors:
vector.show(context)
# Showing
def show(self, context):
"""Show the vectors from the position."""
for vector in self.vectors[1:]:
vector.show(context, self.position)
# Updating the motion
def update(self, dt=1):
"""Update the motion according to physics."""
l = len(self.vectors)
for i in range(1, l):
self.vectors[-i - 1] += self.vectors[-i] * dt
# Representation
def __str__(self):
"""Return the str representation of the motion."""
return "mt(" + ",".join(map(str, self.vectors)) + ")"
# Iterations
def __iter__(self):
"""Iterate the vectors."""
self.iterator = 0
return self
def __next__(self):
"""Return the next vector of the iteration."""
if self.iterator < len(self.vectors):
self.iterator += 1
return self.vectors[self.iterator - 1]
else:
raise StopIteration
# Time behaviour
def next(self, t=1):
"""Return the next motion using its actual one using optional time t."""
acceleration = Vector([a for a in self.acceleration])
velocity = Vector([v + a * t for (v, a) in zip(self.velocity, self.acceleration)])
position = Vector([p + v * t for (p, v) in zip(self.position, self.velocity)])
return Motion(position, velocity, acceleration)
def previous(self, t=1):
"""Return the previous motion using its actual one using optional time t."""
acceleration = Vector([a for a in self.acceleration])
velocity = Vector([v - a * t for (v, a) in zip(self.velocity, self.acceleration)])
position = Vector([p - v * t for (p, v) in zip(self.position, self.velocity)])
return Motion(position, velocity, acceleration)
# Length
def __len__(self):
"""Return the number of vectors."""
return len(self.vectors)
# Items
def __getitem__(self, index):
"""Return the vector of index 'index.'"""
return self.vectors[index]
def __setitem__(self, index, vector):
"""Set the vector of index 'index.'"""
self.vectors[index] = vector
# Vectors
# Position
def getPosition(self):
"""Return the position of the motion."""
return self.vectors[0]
def setPosition(self, position):
"""Set the position of the motion using position."""
self.vectors[0] = position
def delPosition(self):
"""Set the position to zero."""
self.vectors[0] = Vector([0 for i in range(len(self.vectors[0].position))])
# Velocity
def getVelocity(self):
"""Return the velocity of the motion."""
return self.vectors[1]
def setVelocity(self, velocity):
"""Set the velocity of the motion using velocity."""
self.vectors[1] = velocity
def delVelocity(self):
"""Set the velocity to zero."""
self.vectors[1] = Vector([0 for i in range(len(self.vectors[1].position))])
# Acceleration
def getAcceleration(self):
"""Return the acceleration of the motion."""
return self.vectors[2]
def setAcceleration(self, acceleration):
"""Set the acceleration of the motion."""
self.vectors[2] = acceleration
def delAcceleration(self):
"""Set the acceleration to zero."""
self.vectors[2] = Vector([0 for i in range(len(self.vectors[2].position))])
# Operations
def __neg__(self):
"""Return the motions made of the negative vectors."""
return Motion(*[-v for v in self.vectors])
__radd__ = __add__ = lambda self, other: Motion(*[v1 + v2 for (v1, v2) in zip(self.vectors, other.vectors)]) # Addition
__rsub__ = __sub__ = lambda self, other: Motion(*[v1 - v2 for (v1, v2) in zip(self.vectors, other.vectors)]) # Substraction
__rmul__ = __mul__ = lambda self, other: Motion(*[v * other for v in self.vectors]) # Multiplication
__rtruediv__ = __truediv__ = lambda self, other: Motion(*[v / other for v in self.vectors]) # Division
__rfloordiv__ = __floordiv__ = lambda self, other: Motion(*[v // other for v in self.vectors]) # Floor Division
def __iadd__(self, other):
"""Add the other motion to the motion."""
self.set(self + other)
return self
def __isub__(self, other):
"""Substract the other motion to the motion."""
self.set(self - other)
return self
def __imul__(self, other):
"""Multiply a motion by a scalar."""
self.set(self * other)
return self
def __itruediv__(self, other):
"""Divide a motion by a scalar."""
self.set(self / other)
return self
def __ifloordiv__(self, other):
"""Divide motion by a scalar according to euclidian division."""
self.set(self // other)
return self
# Properties
position = property(getPosition, setPosition, delPosition, "Allow the user to manipulate the position.")
velocity = property(getVelocity, setVelocity, delVelocity, "Allow the user to manipulate the velocity.")
acceleration = property(getAcceleration, setAcceleration, delAcceleration,
"Allow the user to manipulate the acceleration.")
# Other derivatives in order...
# jerk=property(getJerk,setJerk,delJerk,"Representation of the jerk.")
# snap=jounce=property(getSnap,setSnap,delSnap,"Representation of the snap.")
# crackle=property(getCrackle,setCrackle,delCrackle,"Representation of the crackle.")
# pop=property(getPop,setPop,delPop,"Representation of the pop.")
class Moment(Motion):
"""A moment is a motion that correspond to the angular momentum of an object.
The only difference here are the default parameters such as the dimensions,
the number of vectors and the colors."""
# Instance methods
# Initializing
def __init__(self, *vectors, n=2, d=1):
"""Create a motion using vectors."""
if len(vectors) > 0:
if isinstance(vectors[0], list):
vectors = vectors[0]
self.vectors = list(vectors)
if len(self.vectors) >= 1:
self.position.color = colors.PURPLE
if len(self.vectors) >= 2:
self.velocity.color = colors.ORANGE
if len(self.vectors) >= 3:
self.acceleration.color = colors.YELLOW
# Representation
def __str__(self):
"""Return the str representation of the moment."""
return "mm(" + ",".join(map(str, self.vectors)) + ")"
# Showing
def show(self, context, point=Point(0, 0), angle=0):
"""Show the moment."""
if len(self) >= 1:
mp = self.position
v = Vector.createFromPolar(mp.norm, angle)
v.color = mp.color
v.show(context, point)
if len(self) >= 2:
angle += math.pi / 2
mv = self.velocity
v = Vector.createFromPolar(mv.norm, angle)
v.color = mv.color
v.show(context, point)
if len(self) >= 3:
angle += math.pi / 2
ma = self.acceleration
a = Vector.createFromPolar(ma.norm, angle)
a.color = ma.color
a.show(context, point)
if __name__ == "__main__":
from .context import Context
context = Context(name="Motion Demonstration")
motion1 = Motion.random()
motion2 = Motion.random()
motion = motion1 + motion2
motion = Motion.sum([Motion.random() for i in range(9)] + [motion]) # Summing 10 motions together
moment = Moment.random()
print(motion, moment)
while context.open:
context.check()
context.control()
context.clear()
context.show()
motion.show(context)
moment.show(context)
context.flip()
|
py | 1a4a38d54edb75057c2c734d4ec4990e3adc1f75 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import itertools
import operator
import zlib
import jmespath
import re
from c7n.actions import BaseAction, ModifyVpcSecurityGroupsAction
from c7n.exceptions import PolicyValidationError, ClientError
from c7n.filters import (
DefaultVpcBase, Filter, ValueFilter)
import c7n.filters.vpc as net_filters
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.filters.related import RelatedResourceFilter, RelatedResourceByIdFilter
from c7n.filters.revisions import Diff
from c7n import query, resolver
from c7n.manager import resources
from c7n.resources.securityhub import OtherResourcePostFinding, PostFinding
from c7n.utils import (
chunks, local_session, type_schema, get_retry, parse_cidr)
from c7n.resources.aws import shape_validate
from c7n.resources.shield import IsShieldProtected, SetShieldProtection
@resources.register('vpc')
class Vpc(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc'
enum_spec = ('describe_vpcs', 'Vpcs', None)
name = id = 'VpcId'
filter_name = 'VpcIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPC'
id_prefix = "vpc-"
@Vpc.filter_registry.register('flow-logs')
class FlowLogFilter(Filter):
"""Are flow logs enabled on the resource.
ie to find all vpcs with flows logs disabled we can do this
:example:
.. code-block:: yaml
policies:
- name: flow-logs-enabled
resource: vpc
filters:
- flow-logs
or to find all vpcs with flow logs but that don't match a
particular configuration.
:example:
.. code-block:: yaml
policies:
- name: flow-mis-configured
resource: vpc
filters:
- not:
- type: flow-logs
enabled: true
set-op: or
op: equal
# equality operator applies to following keys
traffic-type: all
status: active
log-group: vpc-logs
"""
schema = type_schema(
'flow-logs',
**{'enabled': {'type': 'boolean', 'default': False},
'op': {'enum': ['equal', 'not-equal'], 'default': 'equal'},
'set-op': {'enum': ['or', 'and'], 'default': 'or'},
'status': {'enum': ['active']},
'deliver-status': {'enum': ['success', 'failure']},
'destination': {'type': 'string'},
'destination-type': {'enum': ['s3', 'cloud-watch-logs']},
'traffic-type': {'enum': ['accept', 'reject', 'all']},
'log-format': {'type': 'string'},
'log-group': {'type': 'string'}})
permissions = ('ec2:DescribeFlowLogs',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
# TODO given subnet/nic level logs, we should paginate, but we'll
# need to add/update botocore pagination support.
logs = client.describe_flow_logs().get('FlowLogs', ())
m = self.manager.get_model()
resource_map = {}
for fl in logs:
resource_map.setdefault(fl['ResourceId'], []).append(fl)
enabled = self.data.get('enabled', False)
log_group = self.data.get('log-group')
log_format = self.data.get('log-format')
traffic_type = self.data.get('traffic-type')
destination_type = self.data.get('destination-type')
destination = self.data.get('destination')
status = self.data.get('status')
delivery_status = self.data.get('deliver-status')
op = self.data.get('op', 'equal') == 'equal' and operator.eq or operator.ne
set_op = self.data.get('set-op', 'or')
results = []
# looping over vpc resources
for r in resources:
if r[m.id] not in resource_map:
# we didn't find a flow log for this vpc
if enabled:
# vpc flow logs not enabled so exclude this vpc from results
continue
results.append(r)
continue
flogs = resource_map[r[m.id]]
r['c7n:flow-logs'] = flogs
# config comparisons are pointless if we only want vpcs with no flow logs
if enabled:
fl_matches = []
for fl in flogs:
dest_type_match = (destination_type is None) or op(
fl['LogDestinationType'], destination_type)
dest_match = (destination is None) or op(
fl['LogDestination'], destination)
status_match = (status is None) or op(fl['FlowLogStatus'], status.upper())
delivery_status_match = (delivery_status is None) or op(
fl['DeliverLogsStatus'], delivery_status.upper())
traffic_type_match = (
traffic_type is None) or op(
fl['TrafficType'],
traffic_type.upper())
log_group_match = (log_group is None) or op(fl.get('LogGroupName'), log_group)
log_format_match = (log_format is None) or op(fl.get('LogFormat'), log_format)
# combine all conditions to check if flow log matches the spec
fl_match = (status_match and traffic_type_match and dest_match and
log_format_match and log_group_match and
dest_type_match and delivery_status_match)
fl_matches.append(fl_match)
if set_op == 'or':
if any(fl_matches):
results.append(r)
elif set_op == 'and':
if all(fl_matches):
results.append(r)
return results
@Vpc.filter_registry.register('security-group')
class VpcSecurityGroupFilter(RelatedResourceFilter):
"""Filter VPCs based on Security Group attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-sg
resource: vpc
filters:
- type: security-group
key: tag:Color
value: Gray
"""
schema = type_schema(
'security-group', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.SecurityGroup"
RelatedIdsExpression = '[SecurityGroups][].GroupId'
AnnotationKey = "matched-vpcs"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_group_ids = {
g['GroupId'] for g in
self.manager.get_resource_manager('security-group').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_group_ids
@Vpc.filter_registry.register('subnet')
class VpcSubnetFilter(RelatedResourceFilter):
"""Filter VPCs based on Subnet attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-subnet
resource: vpc
filters:
- type: subnet
key: tag:Color
value: Gray
"""
schema = type_schema(
'subnet', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.Subnet"
RelatedIdsExpression = '[Subnets][].SubnetId'
AnnotationKey = "MatchedVpcsSubnets"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_subnet_ids = {
g['SubnetId'] for g in
self.manager.get_resource_manager('subnet').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_subnet_ids
@Vpc.filter_registry.register('nat-gateway')
class VpcNatGatewayFilter(RelatedResourceFilter):
"""Filter VPCs based on NAT Gateway attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-nat
resource: vpc
filters:
- type: nat-gateway
key: tag:Color
value: Gray
"""
schema = type_schema(
'nat-gateway', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.NATGateway"
RelatedIdsExpression = '[NatGateways][].NatGatewayId'
AnnotationKey = "MatchedVpcsNatGateways"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_natgw_ids = {
g['NatGatewayId'] for g in
self.manager.get_resource_manager('nat-gateway').resources()
if g.get('VpcId', '') in vpc_ids
}
return vpc_natgw_ids
@Vpc.filter_registry.register('internet-gateway')
class VpcInternetGatewayFilter(RelatedResourceFilter):
"""Filter VPCs based on Internet Gateway attributes
:example:
.. code-block:: yaml
policies:
- name: vpc-by-igw
resource: vpc
filters:
- type: internet-gateway
key: tag:Color
value: Gray
"""
schema = type_schema(
'internet-gateway', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.InternetGateway"
RelatedIdsExpression = '[InternetGateways][].InternetGatewayId'
AnnotationKey = "MatchedVpcsIgws"
def get_related_ids(self, resources):
vpc_ids = [vpc['VpcId'] for vpc in resources]
vpc_igw_ids = set()
for igw in self.manager.get_resource_manager('internet-gateway').resources():
for attachment in igw['Attachments']:
if attachment.get('VpcId', '') in vpc_ids:
vpc_igw_ids.add(igw['InternetGatewayId'])
return vpc_igw_ids
@Vpc.filter_registry.register('vpc-attributes')
class AttributesFilter(Filter):
"""Filters VPCs based on their DNS attributes
:example:
.. code-block:: yaml
policies:
- name: dns-hostname-enabled
resource: vpc
filters:
- type: vpc-attributes
dnshostnames: True
"""
schema = type_schema(
'vpc-attributes',
dnshostnames={'type': 'boolean'},
dnssupport={'type': 'boolean'})
permissions = ('ec2:DescribeVpcAttribute',)
def process(self, resources, event=None):
results = []
client = local_session(self.manager.session_factory).client('ec2')
dns_hostname = self.data.get('dnshostnames', None)
dns_support = self.data.get('dnssupport', None)
for r in resources:
if dns_hostname is not None:
hostname = client.describe_vpc_attribute(
VpcId=r['VpcId'],
Attribute='enableDnsHostnames'
)['EnableDnsHostnames']['Value']
if dns_support is not None:
support = client.describe_vpc_attribute(
VpcId=r['VpcId'],
Attribute='enableDnsSupport'
)['EnableDnsSupport']['Value']
if dns_hostname is not None and dns_support is not None:
if dns_hostname == hostname and dns_support == support:
results.append(r)
elif dns_hostname is not None and dns_support is None:
if dns_hostname == hostname:
results.append(r)
elif dns_support is not None and dns_hostname is None:
if dns_support == support:
results.append(r)
return results
@Vpc.filter_registry.register('dhcp-options')
class DhcpOptionsFilter(Filter):
"""Filter VPCs based on their dhcp options
:example:
.. code-block:: yaml
policies:
- name: vpcs-in-domain
resource: vpc
filters:
- type: dhcp-options
domain-name: ec2.internal
if an option value is specified as a list, then all elements must be present.
if an option value is specified as a string, then that string must be present.
vpcs not matching a given option value can be found via specifying
a `present: false` parameter.
"""
option_keys = ('domain-name', 'domain-name-servers', 'ntp-servers')
schema = type_schema('dhcp-options', **{
k: {'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
{'type': 'string'}]}
for k in option_keys})
schema['properties']['present'] = {'type': 'boolean'}
permissions = ('ec2:DescribeDhcpOptions',)
def validate(self):
if not any([self.data.get(k) for k in self.option_keys]):
raise PolicyValidationError("one of %s required" % (self.option_keys,))
return self
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
option_ids = [r['DhcpOptionsId'] for r in resources]
options_map = {}
results = []
for options in client.describe_dhcp_options(
Filters=[{
'Name': 'dhcp-options-id',
'Values': option_ids}]).get('DhcpOptions', ()):
options_map[options['DhcpOptionsId']] = {
o['Key']: [v['Value'] for v in o['Values']]
for o in options['DhcpConfigurations']}
for vpc in resources:
if self.process_vpc(vpc, options_map[vpc['DhcpOptionsId']]):
results.append(vpc)
return results
def process_vpc(self, vpc, dhcp):
vpc['c7n:DhcpConfiguration'] = dhcp
found = True
for k in self.option_keys:
if k not in self.data:
continue
is_list = isinstance(self.data[k], list)
if k not in dhcp:
found = False
elif not is_list and self.data[k] not in dhcp[k]:
found = False
elif is_list and sorted(self.data[k]) != sorted(dhcp[k]):
found = False
if not self.data.get('present', True):
found = not found
return found
@Vpc.action_registry.register('post-finding')
class VpcPostFinding(PostFinding):
resource_type = "AwsEc2Vpc"
def format_resource(self, r):
envelope, payload = self.format_envelope(r)
# more inane sechub formatting deltas
detail = {
'DhcpOptionsId': r.get('DhcpOptionsId'),
'State': r['State']}
for assoc in r.get('CidrBlockAssociationSet', ()):
detail.setdefault('CidrBlockAssociationSet', []).append(dict(
AssociationId=assoc['AssociationId'],
CidrBlock=assoc['CidrBlock'],
CidrBlockState=assoc['CidrBlockState']['State']))
for assoc in r.get('Ipv6CidrBlockAssociationSet', ()):
detail.setdefault('Ipv6CidrBlockAssociationSet', []).append(dict(
AssociationId=assoc['AssociationId'],
Ipv6CidrBlock=assoc['Ipv6CidrBlock'],
CidrBlockState=assoc['Ipv6CidrBlockState']['State']))
payload.update(self.filter_empty(detail))
return envelope
class DescribeSubnets(query.DescribeSource):
def get_resources(self, resource_ids):
while resource_ids:
try:
return super().get_resources(resource_ids)
except ClientError as e:
if e.response['Error']['Code'] != 'InvalidSubnetID.NotFound':
raise
sid = extract_subnet_id(e)
if sid:
resource_ids.remove(sid)
else:
return []
RE_ERROR_SUBNET_ID = re.compile("'(?P<subnet_id>subnet-.*?)'")
def extract_subnet_id(state_error):
"Extract an subnet id from an error"
subnet_id = None
match = RE_ERROR_SUBNET_ID.search(str(state_error))
if match:
subnet_id = match.groupdict().get('subnet_id')
return subnet_id
@resources.register('subnet')
class Subnet(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'subnet'
enum_spec = ('describe_subnets', 'Subnets', None)
name = id = 'SubnetId'
filter_name = 'SubnetIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::Subnet'
id_prefix = "subnet-"
source_mapping = {
'describe': DescribeSubnets,
'config': query.ConfigSource}
Subnet.filter_registry.register('flow-logs', FlowLogFilter)
@Subnet.filter_registry.register('vpc')
class SubnetVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
class ConfigSG(query.ConfigSource):
def load_resource(self, item):
r = super(ConfigSG, self).load_resource(item)
for rset in ('IpPermissions', 'IpPermissionsEgress'):
for p in r.get(rset, ()):
if p.get('FromPort', '') is None:
p.pop('FromPort')
if p.get('ToPort', '') is None:
p.pop('ToPort')
if 'Ipv6Ranges' not in p:
p[u'Ipv6Ranges'] = []
for i in p.get('UserIdGroupPairs', ()):
for k, v in list(i.items()):
if v is None:
i.pop(k)
# legacy config form, still version 1.2
for attribute, element_key in (('IpRanges', u'CidrIp'),):
if attribute not in p:
continue
p[attribute] = [{element_key: v} for v in p[attribute]]
if 'Ipv4Ranges' in p:
p['IpRanges'] = p.pop('Ipv4Ranges')
return r
@resources.register('security-group')
class SecurityGroup(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'security-group'
enum_spec = ('describe_security_groups', 'SecurityGroups', None)
id = 'GroupId'
name = 'GroupName'
filter_name = "GroupIds"
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::SecurityGroup"
id_prefix = "sg-"
source_mapping = {
'config': ConfigSG,
'describe': query.DescribeSource
}
@SecurityGroup.filter_registry.register('diff')
class SecurityGroupDiffFilter(Diff):
def diff(self, source, target):
differ = SecurityGroupDiff()
return differ.diff(source, target)
class SecurityGroupDiff:
"""Diff two versions of a security group
Immutable: GroupId, GroupName, Description, VpcId, OwnerId
Mutable: Tags, Rules
"""
def diff(self, source, target):
delta = {}
tag_delta = self.get_tag_delta(source, target)
if tag_delta:
delta['tags'] = tag_delta
ingress_delta = self.get_rule_delta('IpPermissions', source, target)
if ingress_delta:
delta['ingress'] = ingress_delta
egress_delta = self.get_rule_delta(
'IpPermissionsEgress', source, target)
if egress_delta:
delta['egress'] = egress_delta
if delta:
return delta
def get_tag_delta(self, source, target):
source_tags = {t['Key']: t['Value'] for t in source.get('Tags', ())}
target_tags = {t['Key']: t['Value'] for t in target.get('Tags', ())}
target_keys = set(target_tags.keys())
source_keys = set(source_tags.keys())
removed = source_keys.difference(target_keys)
added = target_keys.difference(source_keys)
changed = set()
for k in target_keys.intersection(source_keys):
if source_tags[k] != target_tags[k]:
changed.add(k)
return {k: v for k, v in {
'added': {k: target_tags[k] for k in added},
'removed': {k: source_tags[k] for k in removed},
'updated': {k: target_tags[k] for k in changed}}.items() if v}
def get_rule_delta(self, key, source, target):
source_rules = {
self.compute_rule_hash(r): r for r in source.get(key, ())}
target_rules = {
self.compute_rule_hash(r): r for r in target.get(key, ())}
source_keys = set(source_rules.keys())
target_keys = set(target_rules.keys())
removed = source_keys.difference(target_keys)
added = target_keys.difference(source_keys)
return {k: v for k, v in
{'removed': [source_rules[rid] for rid in sorted(removed)],
'added': [target_rules[rid] for rid in sorted(added)]}.items() if v}
RULE_ATTRS = (
('PrefixListIds', 'PrefixListId'),
('UserIdGroupPairs', 'GroupId'),
('IpRanges', 'CidrIp'),
('Ipv6Ranges', 'CidrIpv6')
)
def compute_rule_hash(self, rule):
buf = "%d-%d-%s-" % (
rule.get('FromPort', 0) or 0,
rule.get('ToPort', 0) or 0,
rule.get('IpProtocol', '-1') or '-1'
)
for a, ke in self.RULE_ATTRS:
if a not in rule:
continue
ev = [e[ke] for e in rule[a]]
ev.sort()
for e in ev:
buf += "%s-" % e
# mask to generate the same numeric value across all Python versions
return zlib.crc32(buf.encode('ascii')) & 0xffffffff
@SecurityGroup.action_registry.register('patch')
class SecurityGroupApplyPatch(BaseAction):
"""Modify a resource via application of a reverse delta.
"""
schema = type_schema('patch')
permissions = ('ec2:AuthorizeSecurityGroupIngress',
'ec2:AuthorizeSecurityGroupEgress',
'ec2:RevokeSecurityGroupIngress',
'ec2:RevokeSecurityGroupEgress',
'ec2:CreateTags',
'ec2:DeleteTags')
def validate(self):
diff_filters = [n for n in self.manager.iter_filters() if isinstance(
n, SecurityGroupDiffFilter)]
if not len(diff_filters):
raise PolicyValidationError(
"resource patching requires diff filter")
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
differ = SecurityGroupDiff()
patcher = SecurityGroupPatch()
for r in resources:
# reverse the patch by computing fresh, the forward
# patch is for notifications
d = differ.diff(r, r['c7n:previous-revision']['resource'])
patcher.apply_delta(client, r, d)
class SecurityGroupPatch:
RULE_TYPE_MAP = {
'egress': ('IpPermissionsEgress',
'revoke_security_group_egress',
'authorize_security_group_egress'),
'ingress': ('IpPermissions',
'revoke_security_group_ingress',
'authorize_security_group_ingress')}
retry = staticmethod(get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded')))
def apply_delta(self, client, target, change_set):
if 'tags' in change_set:
self.process_tags(client, target, change_set['tags'])
if 'ingress' in change_set:
self.process_rules(
client, 'ingress', target, change_set['ingress'])
if 'egress' in change_set:
self.process_rules(
client, 'egress', target, change_set['egress'])
def process_tags(self, client, group, tag_delta):
if 'removed' in tag_delta:
self.retry(client.delete_tags,
Resources=[group['GroupId']],
Tags=[{'Key': k}
for k in tag_delta['removed']])
tags = []
if 'added' in tag_delta:
tags.extend(
[{'Key': k, 'Value': v}
for k, v in tag_delta['added'].items()])
if 'updated' in tag_delta:
tags.extend(
[{'Key': k, 'Value': v}
for k, v in tag_delta['updated'].items()])
if tags:
self.retry(
client.create_tags, Resources=[group['GroupId']], Tags=tags)
def process_rules(self, client, rule_type, group, delta):
key, revoke_op, auth_op = self.RULE_TYPE_MAP[rule_type]
revoke, authorize = getattr(
client, revoke_op), getattr(client, auth_op)
# Process removes
if 'removed' in delta:
self.retry(revoke, GroupId=group['GroupId'],
IpPermissions=[r for r in delta['removed']])
# Process adds
if 'added' in delta:
self.retry(authorize, GroupId=group['GroupId'],
IpPermissions=[r for r in delta['added']])
class SGUsage(Filter):
def get_permissions(self):
return list(itertools.chain(
*[self.manager.get_resource_manager(m).get_permissions()
for m in
['lambda', 'eni', 'launch-config', 'security-group', 'event-rule-target']]))
def filter_peered_refs(self, resources):
if not resources:
return resources
# Check that groups are not referenced across accounts
client = local_session(self.manager.session_factory).client('ec2')
peered_ids = set()
for resource_set in chunks(resources, 200):
for sg_ref in client.describe_security_group_references(
GroupId=[r['GroupId'] for r in resource_set]
)['SecurityGroupReferenceSet']:
peered_ids.add(sg_ref['GroupId'])
self.log.debug(
"%d of %d groups w/ peered refs", len(peered_ids), len(resources))
return [r for r in resources if r['GroupId'] not in peered_ids]
def get_scanners(self):
return (
("nics", self.get_eni_sgs),
("sg-perm-refs", self.get_sg_refs),
('lambdas', self.get_lambda_sgs),
("launch-configs", self.get_launch_config_sgs),
("ecs-cwe", self.get_ecs_cwe_sgs),
("codebuild", self.get_codebuild_sgs),
)
def scan_groups(self):
used = set()
for kind, scanner in self.get_scanners():
sg_ids = scanner()
new_refs = sg_ids.difference(used)
used = used.union(sg_ids)
self.log.debug(
"%s using %d sgs, new refs %s total %s",
kind, len(sg_ids), len(new_refs), len(used))
return used
def get_launch_config_sgs(self):
# Note assuming we also have launch config garbage collection
# enabled.
sg_ids = set()
for cfg in self.manager.get_resource_manager('launch-config').resources():
for g in cfg['SecurityGroups']:
sg_ids.add(g)
for g in cfg['ClassicLinkVPCSecurityGroups']:
sg_ids.add(g)
return sg_ids
def get_lambda_sgs(self):
sg_ids = set()
for func in self.manager.get_resource_manager('lambda').resources(augment=False):
if 'VpcConfig' not in func:
continue
for g in func['VpcConfig']['SecurityGroupIds']:
sg_ids.add(g)
return sg_ids
def get_eni_sgs(self):
sg_ids = set()
for nic in self.manager.get_resource_manager('eni').resources():
for g in nic['Groups']:
sg_ids.add(g['GroupId'])
return sg_ids
def get_codebuild_sgs(self):
sg_ids = set()
for cb in self.manager.get_resource_manager('codebuild').resources():
sg_ids |= set(cb.get('vpcConfig', {}).get('securityGroupIds', []))
return sg_ids
def get_sg_refs(self):
sg_ids = set()
for sg in self.manager.get_resource_manager('security-group').resources():
for perm_type in ('IpPermissions', 'IpPermissionsEgress'):
for p in sg.get(perm_type, []):
for g in p.get('UserIdGroupPairs', ()):
sg_ids.add(g['GroupId'])
return sg_ids
def get_ecs_cwe_sgs(self):
sg_ids = set()
expr = jmespath.compile(
'EcsParameters.NetworkConfiguration.awsvpcConfiguration.SecurityGroups[]')
for rule in self.manager.get_resource_manager(
'event-rule-target').resources(augment=False):
ids = expr.search(rule)
if ids:
sg_ids.update(ids)
return sg_ids
@SecurityGroup.filter_registry.register('unused')
class UnusedSecurityGroup(SGUsage):
"""Filter to just vpc security groups that are not used.
We scan all extant enis in the vpc to get a baseline set of groups
in use. Then augment with those referenced by launch configs, and
lambdas as they may not have extant resources in the vpc at a
given moment. We also find any security group with references from
other security group either within the vpc or across peered
connections. Also checks cloud watch event targeting ecs.
Checks - enis, lambda, launch-configs, sg rule refs, and ecs cwe
targets.
Note this filter does not support classic security groups atm.
:example:
.. code-block:: yaml
policies:
- name: security-groups-unused
resource: security-group
filters:
- unused
"""
schema = type_schema('unused')
def process(self, resources, event=None):
used = self.scan_groups()
unused = [
r for r in resources
if r['GroupId'] not in used and 'VpcId' in r]
return unused and self.filter_peered_refs(unused) or []
@SecurityGroup.filter_registry.register('used')
class UsedSecurityGroup(SGUsage):
"""Filter to security groups that are used.
This operates as a complement to the unused filter for multi-step
workflows.
:example:
.. code-block:: yaml
policies:
- name: security-groups-in-use
resource: security-group
filters:
- used
"""
schema = type_schema('used')
def process(self, resources, event=None):
used = self.scan_groups()
unused = [
r for r in resources
if r['GroupId'] not in used and 'VpcId' in r]
unused = {g['GroupId'] for g in self.filter_peered_refs(unused)}
return [r for r in resources if r['GroupId'] not in unused]
@SecurityGroup.filter_registry.register('stale')
class Stale(Filter):
"""Filter to find security groups that contain stale references
to other groups that are either no longer present or traverse
a broken vpc peering connection. Note this applies to VPC
Security groups only and will implicitly filter security groups.
AWS Docs:
https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-security-groups.html
:example:
.. code-block:: yaml
policies:
- name: stale-security-groups
resource: security-group
filters:
- stale
"""
schema = type_schema('stale')
permissions = ('ec2:DescribeStaleSecurityGroups',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ec2')
vpc_ids = {r['VpcId'] for r in resources if 'VpcId' in r}
group_map = {r['GroupId']: r for r in resources}
results = []
self.log.debug("Querying %d vpc for stale refs", len(vpc_ids))
stale_count = 0
for vpc_id in vpc_ids:
stale_groups = client.describe_stale_security_groups(
VpcId=vpc_id).get('StaleSecurityGroupSet', ())
stale_count += len(stale_groups)
for s in stale_groups:
if s['GroupId'] in group_map:
r = group_map[s['GroupId']]
if 'StaleIpPermissions' in s:
r['MatchedIpPermissions'] = s['StaleIpPermissions']
if 'StaleIpPermissionsEgress' in s:
r['MatchedIpPermissionsEgress'] = s[
'StaleIpPermissionsEgress']
results.append(r)
self.log.debug("Found %d stale security groups", stale_count)
return results
@SecurityGroup.filter_registry.register('default-vpc')
class SGDefaultVpc(DefaultVpcBase):
"""Filter that returns any security group that exists within the default vpc
:example:
.. code-block:: yaml
policies:
- name: security-group-default-vpc
resource: security-group
filters:
- default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, resource, event=None):
if 'VpcId' not in resource:
return False
return self.match(resource['VpcId'])
class SGPermission(Filter):
"""Filter for verifying security group ingress and egress permissions
All attributes of a security group permission are available as
value filters.
If multiple attributes are specified the permission must satisfy
all of them. Note that within an attribute match against a list value
of a permission we default to or.
If a group has any permissions that match all conditions, then it
matches the filter.
Permissions that match on the group are annotated onto the group and
can subsequently be used by the remove-permission action.
We have specialized handling for matching `Ports` in ingress/egress
permission From/To range. The following example matches on ingress
rules which allow for a range that includes all of the given ports.
.. code-block:: yaml
- type: ingress
Ports: [22, 443, 80]
As well for verifying that a rule only allows for a specific set of ports
as in the following example. The delta between this and the previous
example is that if the permission allows for any ports not specified here,
then the rule will match. ie. OnlyPorts is a negative assertion match,
it matches when a permission includes ports outside of the specified set.
.. code-block:: yaml
- type: ingress
OnlyPorts: [22]
For simplifying ipranges handling which is specified as a list on a rule
we provide a `Cidr` key which can be used as a value type filter evaluated
against each of the rules. If any iprange cidr match then the permission
matches.
.. code-block:: yaml
- type: ingress
IpProtocol: -1
FromPort: 445
We also have specialized handling for matching self-references in
ingress/egress permissions. The following example matches on ingress
rules which allow traffic its own same security group.
.. code-block:: yaml
- type: ingress
SelfReference: True
As well for assertions that a ingress/egress permission only matches
a given set of ports, *note* OnlyPorts is an inverse match.
.. code-block:: yaml
- type: egress
OnlyPorts: [22, 443, 80]
- type: egress
Cidr:
value_type: cidr
op: in
value: x.y.z
`Cidr` can match ipv4 rules and `CidrV6` can match ipv6 rules. In
this example we are blocking global inbound connections to SSH or
RDP.
.. code-block:: yaml
- or:
- type: ingress
Ports: [22, 3389]
Cidr:
value: "0.0.0.0/0"
- type: ingress
Ports: [22, 3389]
CidrV6:
value: "::/0"
`SGReferences` can be used to filter out SG references in rules.
In this example we want to block ingress rules that reference a SG
that is tagged with `Access: Public`.
.. code-block:: yaml
- type: ingress
SGReferences:
key: "tag:Access"
value: "Public"
op: equal
We can also filter SG references based on the VPC that they are
within. In this example we want to ensure that our outbound rules
that reference SGs are only referencing security groups within a
specified VPC.
.. code-block:: yaml
- type: egress
SGReferences:
key: 'VpcId'
value: 'vpc-11a1a1aa'
op: equal
Likewise, we can also filter SG references by their description.
For example, we can prevent egress rules from referencing any
SGs that have a description of "default - DO NOT USE".
.. code-block:: yaml
- type: egress
SGReferences:
key: 'Description'
value: 'default - DO NOT USE'
op: equal
"""
perm_attrs = {
'IpProtocol', 'FromPort', 'ToPort', 'UserIdGroupPairs',
'IpRanges', 'PrefixListIds'}
filter_attrs = {
'Cidr', 'CidrV6', 'Ports', 'OnlyPorts',
'SelfReference', 'Description', 'SGReferences'}
attrs = perm_attrs.union(filter_attrs)
attrs.add('match-operator')
attrs.add('match-operator')
def validate(self):
delta = set(self.data.keys()).difference(self.attrs)
delta.remove('type')
if delta:
raise PolicyValidationError("Unknown keys %s on %s" % (
", ".join(delta), self.manager.data))
return self
def process(self, resources, event=None):
self.vfilters = []
fattrs = list(sorted(self.perm_attrs.intersection(self.data.keys())))
self.ports = 'Ports' in self.data and self.data['Ports'] or ()
self.only_ports = (
'OnlyPorts' in self.data and self.data['OnlyPorts'] or ())
for f in fattrs:
fv = self.data.get(f)
if isinstance(fv, dict):
fv['key'] = f
else:
fv = {f: fv}
vf = ValueFilter(fv, self.manager)
vf.annotate = False
self.vfilters.append(vf)
return super(SGPermission, self).process(resources, event)
def process_ports(self, perm):
found = None
if 'FromPort' in perm and 'ToPort' in perm:
for port in self.ports:
if port >= perm['FromPort'] and port <= perm['ToPort']:
found = True
break
found = False
only_found = False
for port in self.only_ports:
if port == perm['FromPort'] and port == perm['ToPort']:
only_found = True
if self.only_ports and not only_found:
found = found is None or found and True or False
if self.only_ports and only_found:
found = False
return found
def _process_cidr(self, cidr_key, cidr_type, range_type, perm):
found = None
ip_perms = perm.get(range_type, [])
if not ip_perms:
return False
match_range = self.data[cidr_key]
if isinstance(match_range, dict):
match_range['key'] = cidr_type
else:
match_range = {cidr_type: match_range}
vf = ValueFilter(match_range, self.manager)
vf.annotate = False
for ip_range in ip_perms:
found = vf(ip_range)
if found:
break
else:
found = False
return found
def process_cidrs(self, perm):
found_v6 = found_v4 = None
if 'CidrV6' in self.data:
found_v6 = self._process_cidr('CidrV6', 'CidrIpv6', 'Ipv6Ranges', perm)
if 'Cidr' in self.data:
found_v4 = self._process_cidr('Cidr', 'CidrIp', 'IpRanges', perm)
match_op = self.data.get('match-operator', 'and') == 'and' and all or any
cidr_match = [k for k in (found_v6, found_v4) if k is not None]
if not cidr_match:
return None
return match_op(cidr_match)
def process_description(self, perm):
if 'Description' not in self.data:
return None
d = dict(self.data['Description'])
d['key'] = 'Description'
vf = ValueFilter(d, self.manager)
vf.annotate = False
for k in ('Ipv6Ranges', 'IpRanges', 'UserIdGroupPairs', 'PrefixListIds'):
if k not in perm or not perm[k]:
continue
return vf(perm[k][0])
return False
def process_self_reference(self, perm, sg_id):
found = None
ref_match = self.data.get('SelfReference')
if ref_match is not None:
found = False
if 'UserIdGroupPairs' in perm and 'SelfReference' in self.data:
self_reference = sg_id in [p['GroupId']
for p in perm['UserIdGroupPairs']]
if ref_match is False and not self_reference:
found = True
if ref_match is True and self_reference:
found = True
return found
def process_sg_references(self, perm, owner_id):
sg_refs = self.data.get('SGReferences')
if not sg_refs:
return None
sg_perm = perm.get('UserIdGroupPairs', [])
if not sg_perm:
return False
sg_group_ids = [p['GroupId'] for p in sg_perm if p.get('UserId', '') == owner_id]
sg_resources = self.manager.get_resources(sg_group_ids)
vf = ValueFilter(sg_refs, self.manager)
vf.annotate = False
for sg in sg_resources:
if vf(sg):
return True
return False
def expand_permissions(self, permissions):
"""Expand each list of cidr, prefix list, user id group pair
by port/protocol as an individual rule.
The console ux automatically expands them out as addition/removal is
per this expansion, the describe calls automatically group them.
"""
for p in permissions:
np = dict(p)
values = {}
for k in (u'IpRanges',
u'Ipv6Ranges',
u'PrefixListIds',
u'UserIdGroupPairs'):
values[k] = np.pop(k, ())
np[k] = []
for k, v in values.items():
if not v:
continue
for e in v:
ep = dict(np)
ep[k] = [e]
yield ep
def __call__(self, resource):
matched = []
sg_id = resource['GroupId']
owner_id = resource['OwnerId']
match_op = self.data.get('match-operator', 'and') == 'and' and all or any
for perm in self.expand_permissions(resource[self.ip_permissions_key]):
perm_matches = {}
for idx, f in enumerate(self.vfilters):
perm_matches[idx] = bool(f(perm))
perm_matches['description'] = self.process_description(perm)
perm_matches['ports'] = self.process_ports(perm)
perm_matches['cidrs'] = self.process_cidrs(perm)
perm_matches['self-refs'] = self.process_self_reference(perm, sg_id)
perm_matches['sg-refs'] = self.process_sg_references(perm, owner_id)
perm_match_values = list(filter(
lambda x: x is not None, perm_matches.values()))
# account for one python behavior any([]) == False, all([]) == True
if match_op == all and not perm_match_values:
continue
match = match_op(perm_match_values)
if match:
matched.append(perm)
if matched:
resource['Matched%s' % self.ip_permissions_key] = matched
return True
SGPermissionSchema = {
'match-operator': {'type': 'string', 'enum': ['or', 'and']},
'Ports': {'type': 'array', 'items': {'type': 'integer'}},
'SelfReference': {'type': 'boolean'},
'OnlyPorts': {'type': 'array', 'items': {'type': 'integer'}},
'IpProtocol': {
'oneOf': [
{'enum': ["-1", -1, 'tcp', 'udp', 'icmp', 'icmpv6']},
{'$ref': '#/definitions/filters/value'}
]
},
'FromPort': {'oneOf': [
{'$ref': '#/definitions/filters/value'},
{'type': 'integer'}]},
'ToPort': {'oneOf': [
{'$ref': '#/definitions/filters/value'},
{'type': 'integer'}]},
'UserIdGroupPairs': {},
'IpRanges': {},
'PrefixListIds': {},
'Description': {},
'Cidr': {},
'CidrV6': {},
'SGReferences': {}
}
@SecurityGroup.filter_registry.register('ingress')
class IPPermission(SGPermission):
ip_permissions_key = "IpPermissions"
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {'type': {'enum': ['ingress']}},
'required': ['type']}
schema['properties'].update(SGPermissionSchema)
@SecurityGroup.filter_registry.register('egress')
class IPPermissionEgress(SGPermission):
ip_permissions_key = "IpPermissionsEgress"
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {'type': {'enum': ['egress']}},
'required': ['type']}
schema['properties'].update(SGPermissionSchema)
@SecurityGroup.action_registry.register('delete')
class Delete(BaseAction):
"""Action to delete security group(s)
It is recommended to apply a filter to the delete policy to avoid the
deletion of all security groups returned.
:example:
.. code-block:: yaml
policies:
- name: security-groups-unused-delete
resource: security-group
filters:
- type: unused
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('ec2:DeleteSecurityGroup',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
client.delete_security_group(GroupId=r['GroupId'])
@SecurityGroup.action_registry.register('remove-permissions')
class RemovePermissions(BaseAction):
"""Action to remove ingress/egress rule(s) from a security group
:example:
.. code-block:: yaml
policies:
- name: security-group-revoke-8080
resource: security-group
filters:
- type: ingress
IpProtocol: tcp
Ports: [8080]
actions:
- type: remove-permissions
ingress: matched
"""
schema = type_schema(
'remove-permissions',
ingress={'type': 'string', 'enum': ['matched', 'all']},
egress={'type': 'string', 'enum': ['matched', 'all']})
permissions = ('ec2:RevokeSecurityGroupIngress',
'ec2:RevokeSecurityGroupEgress')
def process(self, resources):
i_perms = self.data.get('ingress', 'matched')
e_perms = self.data.get('egress', 'matched')
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
for label, perms in [('ingress', i_perms), ('egress', e_perms)]:
if perms == 'matched':
key = 'MatchedIpPermissions%s' % (
label == 'egress' and 'Egress' or '')
groups = r.get(key, ())
elif perms == 'all':
key = 'IpPermissions%s' % (
label == 'egress' and 'Egress' or '')
groups = r.get(key, ())
elif isinstance(perms, list):
groups = perms
else:
continue
if not groups:
continue
method = getattr(client, 'revoke_security_group_%s' % label)
method(GroupId=r['GroupId'], IpPermissions=groups)
@SecurityGroup.action_registry.register('set-permissions')
class SetPermissions(BaseAction):
"""Action to add/remove ingress/egress rule(s) to a security group
:example:
.. code-block:: yaml
policies:
- name: ops-access-via
resource: aws.security-group
filters:
- type: ingress
IpProtocol: "-1"
Ports: [22, 3389]
Cidr: "0.0.0.0/0"
actions:
- type: set-permissions
# remove the permission matched by a previous ingress filter.
remove-ingress: matched
# remove permissions by specifying them fully, ie remove default outbound
# access.
remove-egress:
- IpProtocol: "-1"
Cidr: "0.0.0.0/0"
# add a list of permissions to the group.
add-ingress:
# full syntax/parameters to authorize can be used.
- IpPermissions:
- IpProtocol: TCP
FromPort: 22
ToPort: 22
IpRanges:
- Description: Ops SSH Access
CidrIp: "1.1.1.1/32"
- Description: Security SSH Access
CidrIp: "2.2.2.2/32"
# add a list of egress permissions to a security group
add-egress:
- IpProtocol: "TCP"
FromPort: 5044
ToPort: 5044
CidrIp: "192.168.1.2/32"
"""
schema = type_schema(
'set-permissions',
**{'add-ingress': {'type': 'array', 'items': {'type': 'object', 'minProperties': 1}},
'remove-ingress': {'oneOf': [
{'enum': ['all', 'matched']},
{'type': 'array', 'items': {'type': 'object', 'minProperties': 2}}]},
'add-egress': {'type': 'array', 'items': {'type': 'object', 'minProperties': 1}},
'remove-egress': {'oneOf': [
{'enum': ['all', 'matched']},
{'type': 'array', 'items': {'type': 'object', 'minProperties': 2}}]}}
)
permissions = (
'ec2:AuthorizeSecurityGroupEgress',
'ec2:AuthorizeSecurityGroupIngress',)
ingress_shape = "AuthorizeSecurityGroupIngressRequest"
egress_shape = "AuthorizeSecurityGroupEgressRequest"
def validate(self):
request_template = {'GroupId': 'sg-06bc5ce18a2e5d57a'}
for perm_type, shape in (
('egress', self.egress_shape), ('ingress', self.ingress_shape)):
for perm in self.data.get('add-%s' % type, ()):
params = dict(request_template)
params.update(perm)
shape_validate(params, shape, 'ec2')
def get_permissions(self):
perms = ()
if 'add-ingress' in self.data:
perms += ('ec2:AuthorizeSecurityGroupIngress',)
if 'add-egress' in self.data:
perms += ('ec2:AuthorizeSecurityGroupEgress',)
if 'remove-ingress' in self.data or 'remove-egress' in self.data:
perms += RemovePermissions.permissions
if not perms:
perms = self.permissions + RemovePermissions.permissions
return perms
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
for method, permissions in (
(client.authorize_security_group_egress, self.data.get('add-egress', ())),
(client.authorize_security_group_ingress, self.data.get('add-ingress', ()))):
for p in permissions:
p = dict(p)
p['GroupId'] = r['GroupId']
try:
method(**p)
except ClientError as e:
if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':
raise
remover = RemovePermissions(
{'ingress': self.data.get('remove-ingress', ()),
'egress': self.data.get('remove-egress', ())}, self.manager)
remover.process(resources)
@SecurityGroup.action_registry.register('post-finding')
class SecurityGroupPostFinding(OtherResourcePostFinding):
def format_resource(self, r):
fr = super(SecurityGroupPostFinding, self).format_resource(r)
fr['Type'] = 'AwsEc2SecurityGroup'
return fr
class DescribeENI(query.DescribeSource):
def augment(self, resources):
for r in resources:
r['Tags'] = r.pop('TagSet', [])
return resources
@resources.register('eni')
class NetworkInterface(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'eni'
enum_spec = ('describe_network_interfaces', 'NetworkInterfaces', None)
name = id = 'NetworkInterfaceId'
filter_name = 'NetworkInterfaceIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::NetworkInterface"
id_prefix = "eni-"
source_mapping = {
'describe': DescribeENI,
'config': query.ConfigSource
}
NetworkInterface.filter_registry.register('flow-logs', FlowLogFilter)
NetworkInterface.filter_registry.register(
'network-location', net_filters.NetworkLocation)
@NetworkInterface.filter_registry.register('subnet')
class InterfaceSubnetFilter(net_filters.SubnetFilter):
"""Network interface subnet filter
:example:
.. code-block:: yaml
policies:
- name: network-interface-in-subnet
resource: eni
filters:
- type: subnet
key: CidrBlock
value: 10.0.2.0/24
"""
RelatedIdsExpression = "SubnetId"
@NetworkInterface.filter_registry.register('security-group')
class InterfaceSecurityGroupFilter(net_filters.SecurityGroupFilter):
"""Network interface security group filter
:example:
.. code-block:: yaml
policies:
- name: network-interface-ssh
resource: eni
filters:
- type: security-group
match-resource: true
key: FromPort
value: 22
"""
RelatedIdsExpression = "Groups[].GroupId"
@NetworkInterface.filter_registry.register('vpc')
class InterfaceVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@NetworkInterface.action_registry.register('modify-security-groups')
class InterfaceModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Remove security groups from an interface.
Can target either physical groups as a list of group ids or
symbolic groups like 'matched' or 'all'. 'matched' uses
the annotations of the 'group' interface filter.
Note an interface always gets at least one security group, so
we also allow specification of an isolation/quarantine group
that can be specified if there would otherwise be no groups.
:example:
.. code-block:: yaml
policies:
- name: network-interface-remove-group
resource: eni
filters:
- type: security-group
match-resource: true
key: FromPort
value: 22
actions:
- type: modify-security-groups
isolation-group: sg-01ab23c4
add: []
"""
permissions = ('ec2:ModifyNetworkInterfaceAttribute',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
groups = super(
InterfaceModifyVpcSecurityGroups, self).get_groups(resources)
for idx, r in enumerate(resources):
client.modify_network_interface_attribute(
NetworkInterfaceId=r['NetworkInterfaceId'],
Groups=groups[idx])
@NetworkInterface.action_registry.register('delete')
class DeleteNetworkInterface(BaseAction):
"""Delete a network interface.
:example:
.. code-block:: yaml
policies:
- name: mark-orphaned-enis
comment: Flag abandoned Lambda VPC ENIs for deletion
resource: eni
filters:
- Status: available
- type: value
op: glob
key: Description
value: "AWS Lambda VPC ENI*"
- "tag:custodian_status": absent
actions:
- type: mark-for-op
tag: custodian_status
msg: "Orphaned Lambda VPC ENI: {op}@{action_date}"
op: delete
days: 1
- name: delete-marked-enis
comment: Delete flagged ENIs that have not been cleaned up naturally
resource: eni
filters:
- type: marked-for-op
tag: custodian_status
op: delete
actions:
- type: delete
"""
permissions = ('ec2:DeleteNetworkInterface',)
schema = type_schema('delete')
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
try:
self.manager.retry(
client.delete_network_interface,
NetworkInterfaceId=r['NetworkInterfaceId'])
except ClientError as err:
if not err.response['Error']['Code'] == 'InvalidNetworkInterfaceID.NotFound':
raise
@resources.register('route-table')
class RouteTable(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'route-table'
enum_spec = ('describe_route_tables', 'RouteTables', None)
name = id = 'RouteTableId'
filter_name = 'RouteTableIds'
filter_type = 'list'
id_prefix = "rtb-"
cfn_type = config_type = "AWS::EC2::RouteTable"
@RouteTable.filter_registry.register('vpc')
class RouteTableVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@RouteTable.filter_registry.register('subnet')
class SubnetRoute(net_filters.SubnetFilter):
"""Filter a route table by its associated subnet attributes."""
RelatedIdsExpression = "Associations[].SubnetId"
RelatedMapping = None
def get_related_ids(self, resources):
if self.RelatedIdMapping is None:
return super(SubnetRoute, self).get_related_ids(resources)
return list(itertools.chain(*[self.RelatedIdMapping[r['RouteTableId']] for r in resources]))
def get_related(self, resources):
rt_subnet_map = {}
main_tables = {}
manager = self.get_resource_manager()
for r in resources:
rt_subnet_map[r['RouteTableId']] = []
for a in r.get('Associations', ()):
if 'SubnetId' in a:
rt_subnet_map[r['RouteTableId']].append(a['SubnetId'])
elif a.get('Main'):
main_tables[r['VpcId']] = r['RouteTableId']
explicit_subnet_ids = set(itertools.chain(*rt_subnet_map.values()))
subnets = manager.resources()
for s in subnets:
if s['SubnetId'] in explicit_subnet_ids:
continue
if s['VpcId'] not in main_tables:
continue
rt_subnet_map.setdefault(main_tables[s['VpcId']], []).append(s['SubnetId'])
related_subnets = set(itertools.chain(*rt_subnet_map.values()))
self.RelatedIdMapping = rt_subnet_map
return {s['SubnetId']: s for s in subnets if s['SubnetId'] in related_subnets}
@RouteTable.filter_registry.register('route')
class Route(ValueFilter):
"""Filter a route table by its routes' attributes."""
schema = type_schema('route', rinherit=ValueFilter.schema)
schema_alias = False
def process(self, resources, event=None):
results = []
for r in resources:
matched = []
for route in r['Routes']:
if self.match(route):
matched.append(route)
if matched:
r.setdefault('c7n:matched-routes', []).extend(matched)
results.append(r)
return results
@resources.register('transit-gateway')
class TransitGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
enum_spec = ('describe_transit_gateways', 'TransitGateways', None)
name = id = 'TransitGatewayId'
arn = "TransitGatewayArn"
id_prefix = "tgw-"
filter_name = 'TransitGatewayIds'
filter_type = 'list'
cfn_type = 'AWS::EC2::TransitGateway'
class TransitGatewayAttachmentQuery(query.ChildResourceQuery):
def get_parent_parameters(self, params, parent_id, parent_key):
merged_params = dict(params)
merged_params.setdefault('Filters', []).append(
{'Name': parent_key, 'Values': [parent_id]})
return merged_params
@query.sources.register('transit-attachment')
class TransitAttachmentSource(query.ChildDescribeSource):
resource_query_factory = TransitGatewayAttachmentQuery
@resources.register('transit-attachment')
class TransitGatewayAttachment(query.ChildResourceManager):
child_source = 'transit-attachment'
class resource_type(query.TypeInfo):
service = 'ec2'
enum_spec = ('describe_transit_gateway_attachments', 'TransitGatewayAttachments', None)
parent_spec = ('transit-gateway', 'transit-gateway-id', None)
id_prefix = 'tgw-attach-'
name = id = 'TransitGatewayAttachmentId'
arn = False
cfn_type = 'AWS::EC2::TransitGatewayAttachment'
@resources.register('peering-connection')
class PeeringConnection(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-peering-connection'
enum_spec = ('describe_vpc_peering_connections',
'VpcPeeringConnections', None)
name = id = 'VpcPeeringConnectionId'
filter_name = 'VpcPeeringConnectionIds'
filter_type = 'list'
id_prefix = "pcx-"
cfn_type = config_type = "AWS::EC2::VPCPeeringConnection"
@PeeringConnection.filter_registry.register('cross-account')
class CrossAccountPeer(CrossAccountAccessFilter):
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=resolver.ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DescribeVpcPeeringConnections',)
def process(self, resources, event=None):
results = []
accounts = self.get_accounts()
owners = map(jmespath.compile, (
'AccepterVpcInfo.OwnerId', 'RequesterVpcInfo.OwnerId'))
for r in resources:
for o_expr in owners:
account_id = o_expr.search(r)
if account_id and account_id not in accounts:
r.setdefault(
'c7n:CrossAccountViolations', []).append(account_id)
results.append(r)
return results
@PeeringConnection.filter_registry.register('missing-route')
class MissingRoute(Filter):
"""Return peers which are missing a route in route tables.
If the peering connection is between two vpcs in the same account,
the connection is returned unless it is in present route tables in
each vpc.
If the peering connection is between accounts, then the local vpc's
route table is checked.
"""
schema = type_schema('missing-route')
permissions = ('ec2:DescribeRouteTables',)
def process(self, resources, event=None):
tables = self.manager.get_resource_manager(
'route-table').resources()
routed_vpcs = {}
mid = 'VpcPeeringConnectionId'
for t in tables:
for r in t.get('Routes', ()):
if mid in r:
routed_vpcs.setdefault(r[mid], []).append(t['VpcId'])
results = []
for r in resources:
if r[mid] not in routed_vpcs:
results.append(r)
continue
for k in ('AccepterVpcInfo', 'RequesterVpcInfo'):
if r[k]['OwnerId'] != self.manager.config.account_id:
continue
if r[k].get('Region') and r['k']['Region'] != self.manager.config.region:
continue
if r[k]['VpcId'] not in routed_vpcs[r['VpcPeeringConnectionId']]:
results.append(r)
break
return results
@resources.register('network-acl')
class NetworkAcl(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'network-acl'
enum_spec = ('describe_network_acls', 'NetworkAcls', None)
name = id = 'NetworkAclId'
filter_name = 'NetworkAclIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::NetworkAcl"
id_prefix = "acl-"
@NetworkAcl.filter_registry.register('subnet')
class AclSubnetFilter(net_filters.SubnetFilter):
"""Filter network acls by the attributes of their attached subnets.
:example:
.. code-block:: yaml
policies:
- name: subnet-acl
resource: network-acl
filters:
- type: subnet
key: "tag:Location"
value: Public
"""
RelatedIdsExpression = "Associations[].SubnetId"
@NetworkAcl.filter_registry.register('s3-cidr')
class AclAwsS3Cidrs(Filter):
"""Filter network acls by those that allow access to s3 cidrs.
Defaults to filtering those nacls that do not allow s3 communication.
:example:
Find all nacls that do not allow communication with s3.
.. code-block:: yaml
policies:
- name: s3-not-allowed-nacl
resource: network-acl
filters:
- s3-cidr
"""
# TODO allow for port specification as range
schema = type_schema(
's3-cidr',
egress={'type': 'boolean', 'default': True},
ingress={'type': 'boolean', 'default': True},
present={'type': 'boolean', 'default': False})
permissions = ('ec2:DescribePrefixLists',)
def process(self, resources, event=None):
ec2 = local_session(self.manager.session_factory).client('ec2')
cidrs = jmespath.search(
"PrefixLists[].Cidrs[]", ec2.describe_prefix_lists())
cidrs = [parse_cidr(cidr) for cidr in cidrs]
results = []
check_egress = self.data.get('egress', True)
check_ingress = self.data.get('ingress', True)
present = self.data.get('present', False)
for r in resources:
matched = {cidr: None for cidr in cidrs}
for entry in r['Entries']:
if entry['Egress'] and not check_egress:
continue
if not entry['Egress'] and not check_ingress:
continue
entry_cidr = parse_cidr(entry['CidrBlock'])
for c in matched:
if c in entry_cidr and matched[c] is None:
matched[c] = (
entry['RuleAction'] == 'allow' and True or False)
if present and all(matched.values()):
results.append(r)
elif not present and not all(matched.values()):
results.append(r)
return results
class DescribeElasticIp(query.DescribeSource):
def augment(self, resources):
return [r for r in resources if self.manager.resource_type.id in r]
@resources.register('elastic-ip', aliases=('network-addr',))
class NetworkAddress(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'eip-allocation'
enum_spec = ('describe_addresses', 'Addresses', None)
name = 'PublicIp'
id = 'AllocationId'
id_prefix = 'eipalloc-'
filter_name = 'AllocationIds'
filter_type = 'list'
config_type = "AWS::EC2::EIP"
source_mapping = {
'describe': DescribeElasticIp,
'config': query.ConfigSource
}
NetworkAddress.filter_registry.register('shield-enabled', IsShieldProtected)
NetworkAddress.action_registry.register('set-shield', SetShieldProtection)
@NetworkAddress.action_registry.register('release')
class AddressRelease(BaseAction):
"""Action to release elastic IP address(es)
Use the force option to cause any attached elastic IPs to
also be released. Otherwise, only unattached elastic IPs
will be released.
:example:
.. code-block:: yaml
policies:
- name: release-network-addr
resource: network-addr
filters:
- AllocationId: ...
actions:
- type: release
force: True
"""
schema = type_schema('release', force={'type': 'boolean'})
permissions = ('ec2:ReleaseAddress', 'ec2:DisassociateAddress',)
def process_attached(self, client, associated_addrs):
for aa in list(associated_addrs):
try:
client.disassociate_address(AssociationId=aa['AssociationId'])
except ClientError as e:
# If its already been diassociated ignore, else raise.
if not(e.response['Error']['Code'] == 'InvalidAssocationID.NotFound' and
aa['AssocationId'] in e.response['Error']['Message']):
raise e
associated_addrs.remove(aa)
return associated_addrs
def process(self, network_addrs):
client = local_session(self.manager.session_factory).client('ec2')
force = self.data.get('force')
assoc_addrs = [addr for addr in network_addrs if 'AssociationId' in addr]
unassoc_addrs = [addr for addr in network_addrs if 'AssociationId' not in addr]
if len(assoc_addrs) and not force:
self.log.warning(
"Filtered %d attached eips of %d eips. Use 'force: true' to release them.",
len(assoc_addrs), len(network_addrs))
elif len(assoc_addrs) and force:
unassoc_addrs = itertools.chain(
unassoc_addrs, self.process_attached(client, assoc_addrs))
for r in unassoc_addrs:
try:
client.release_address(AllocationId=r['AllocationId'])
except ClientError as e:
# If its already been released, ignore, else raise.
if e.response['Error']['Code'] != 'InvalidAllocationID.NotFound':
raise
@resources.register('customer-gateway')
class CustomerGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'customer-gateway'
enum_spec = ('describe_customer_gateways', 'CustomerGateways', None)
id = 'CustomerGatewayId'
filter_name = 'CustomerGatewayIds'
filter_type = 'list'
name = 'CustomerGatewayId'
id_prefix = "cgw-"
cfn_type = config_type = 'AWS::EC2::CustomerGateway'
@resources.register('internet-gateway')
class InternetGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'internet-gateway'
enum_spec = ('describe_internet_gateways', 'InternetGateways', None)
name = id = 'InternetGatewayId'
filter_name = 'InternetGatewayIds'
filter_type = 'list'
cfn_type = config_type = "AWS::EC2::InternetGateway"
id_prefix = "igw-"
@InternetGateway.action_registry.register('delete')
class DeleteInternetGateway(BaseAction):
"""Action to delete Internet Gateway
:example:
.. code-block:: yaml
policies:
- name: delete-internet-gateway
resource: internet-gateway
actions:
- type: delete
"""
schema = type_schema('delete')
permissions = ('ec2:DeleteInternetGateway',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
try:
client.delete_internet_gateway(InternetGatewayId=r['InternetGatewayId'])
except ClientError as err:
if not err.response['Error']['Code'] == 'InvalidInternetGatewayId.NotFound':
raise
@resources.register('nat-gateway')
class NATGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'nat-gateway'
enum_spec = ('describe_nat_gateways', 'NatGateways', None)
name = id = 'NatGatewayId'
filter_name = 'NatGatewayIds'
filter_type = 'list'
date = 'CreateTime'
dimension = 'NatGatewayId'
metrics_namespace = 'AWS/NATGateway'
id_prefix = "nat-"
cfn_type = config_type = 'AWS::EC2::NatGateway'
@NATGateway.action_registry.register('delete')
class DeleteNATGateway(BaseAction):
schema = type_schema('delete')
permissions = ('ec2:DeleteNatGateway',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
for r in resources:
client.delete_nat_gateway(NatGatewayId=r['NatGatewayId'])
@resources.register('vpn-connection')
class VPNConnection(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-connection'
enum_spec = ('describe_vpn_connections', 'VpnConnections', None)
name = id = 'VpnConnectionId'
filter_name = 'VpnConnectionIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPNConnection'
id_prefix = "vpn-"
@resources.register('vpn-gateway')
class VPNGateway(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-gateway'
enum_spec = ('describe_vpn_gateways', 'VpnGateways', None)
name = id = 'VpnGatewayId'
filter_name = 'VpnGatewayIds'
filter_type = 'list'
cfn_type = config_type = 'AWS::EC2::VPNGateway'
id_prefix = "vgw-"
@resources.register('vpc-endpoint')
class VpcEndpoint(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'vpc-endpoint'
enum_spec = ('describe_vpc_endpoints', 'VpcEndpoints', None)
name = id = 'VpcEndpointId'
date = 'CreationTimestamp'
filter_name = 'VpcEndpointIds'
filter_type = 'list'
id_prefix = "vpce-"
universal_taggable = object()
cfn_type = config_type = "AWS::EC2::VPCEndpoint"
@VpcEndpoint.filter_registry.register('cross-account')
class EndpointCrossAccountFilter(CrossAccountAccessFilter):
policy_attribute = 'PolicyDocument'
annotation_key = 'c7n:CrossAccountViolations'
permissions = ('ec2:DescribeVpcEndpoints',)
@VpcEndpoint.filter_registry.register('security-group')
class EndpointSecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "Groups[].GroupId"
@VpcEndpoint.filter_registry.register('subnet')
class EndpointSubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "SubnetIds[]"
@VpcEndpoint.filter_registry.register('vpc')
class EndpointVpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "VpcId"
@Vpc.filter_registry.register("vpc-endpoint")
class VPCEndpointFilter(RelatedResourceByIdFilter):
"""Filters vpcs based on their vpc-endpoints
:example:
.. code-block:: yaml
policies:
- name: s3-vpc-endpoint-enabled
resource: vpc
filters:
- type: vpc-endpoint
key: ServiceName
value: com.amazonaws.us-east-1.s3
"""
RelatedResource = "c7n.resources.vpc.VpcEndpoint"
RelatedIdsExpression = "VpcId"
AnnotationKey = "matched-vpc-endpoint"
schema = type_schema(
'vpc-endpoint',
rinherit=ValueFilter.schema)
@Subnet.filter_registry.register("vpc-endpoint")
class SubnetEndpointFilter(RelatedResourceByIdFilter):
"""Filters subnets based on their vpc-endpoints
:example:
.. code-block:: yaml
policies:
- name: athena-endpoint-enabled
resource: subnet
filters:
- type: vpc-endpoint
key: ServiceName
value: com.amazonaws.us-east-1.athena
"""
RelatedResource = "c7n.resources.vpc.VpcEndpoint"
RelatedIdsExpression = "SubnetId"
RelatedResourceByIdExpression = "SubnetIds"
AnnotationKey = "matched-vpc-endpoint"
schema = type_schema(
'vpc-endpoint',
rinherit=ValueFilter.schema)
@resources.register('key-pair')
class KeyPair(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'ec2'
arn_type = 'key-pair'
enum_spec = ('describe_key_pairs', 'KeyPairs', None)
name = 'KeyName'
id = 'KeyPairId'
id_prefix = 'key-'
filter_name = 'KeyNames'
filter_type = 'list'
@KeyPair.filter_registry.register('unused')
class UnusedKeyPairs(Filter):
"""Filter for used or unused keys.
The default is unused but can be changed by using the state property.
:example:
.. code-block:: yaml
policies:
- name: unused-key-pairs
resource: aws.key-pair
filters:
- unused
- name: used-key-pairs
resource: aws.key-pair
filters:
- type: unused
state: false
"""
annotation_key = 'c7n:unused_keys'
permissions = ('ec2:DescribeKeyPairs',)
schema = type_schema('unused',
state={'type': 'boolean'})
def process(self, resources, event=None):
instances = self.manager.get_resource_manager('ec2').resources()
used = set(jmespath.search('[].KeyName', instances))
if self.data.get('state', True):
return [r for r in resources if r['KeyName'] not in used]
else:
return [r for r in resources if r['KeyName'] in used]
@KeyPair.action_registry.register('delete')
class DeleteUnusedKeyPairs(BaseAction):
"""Delete all ec2 keys that are not in use
This should always be used with the unused filter
and it will prevent you from using without it.
:example:
.. code-block:: yaml
policies:
- name: delete-unused-key-pairs
resource: aws.key-pair
filters:
- unused
actions:
- delete
"""
permissions = ('ec2:DeleteKeyPair',)
schema = type_schema('delete')
def validate(self):
if not [f for f in self.manager.iter_filters() if isinstance(f, UnusedKeyPairs)]:
raise PolicyValidationError(
"delete should be used in conjunction with the unused filter on %s" % (
self.manager.data,))
if [True for f in self.manager.iter_filters() if f.data.get('state') is False]:
raise PolicyValidationError(
"You policy has filtered used keys you should use this with unused keys %s" % (
self.manager.data,))
return self
def process(self, unused):
client = local_session(self.manager.session_factory).client('ec2')
for key in unused:
client.delete_key_pair(KeyPairId=key['KeyPairId'])
@Vpc.action_registry.register('set-flow-log')
@Subnet.action_registry.register('set-flow-log')
@NetworkInterface.action_registry.register('set-flow-log')
class CreateFlowLogs(BaseAction):
"""Create flow logs for a network resource
:example:
.. code-block:: yaml
policies:
- name: vpc-enable-flow-logs
resource: vpc
filters:
- type: flow-logs
enabled: false
actions:
- type: set-flow-log
DeliverLogsPermissionArn: arn:iam:role
LogGroupName: /custodian/vpc/flowlogs/
"""
permissions = ('ec2:CreateFlowLogs', 'logs:CreateLogGroup',)
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['set-flow-log']},
'state': {'type': 'boolean'},
'DeliverLogsPermissionArn': {'type': 'string'},
'LogGroupName': {'type': 'string'},
'LogDestination': {'type': 'string'},
'LogFormat': {'type': 'string'},
'MaxAggregationInterval': {'type': 'integer'},
'LogDestinationType': {'enum': ['s3', 'cloud-watch-logs']},
'TrafficType': {
'type': 'string',
'enum': ['ACCEPT', 'REJECT', 'ALL']
}
}
}
RESOURCE_ALIAS = {
'vpc': 'VPC',
'subnet': 'Subnet',
'eni': 'NetworkInterface'
}
SchemaValidation = {
's3': {
'required': ['LogDestination'],
'absent': ['LogGroupName', 'DeliverLogsPermissionArn']
},
'cloud-watch-logs': {
'required': ['DeliverLogsPermissionArn'],
'one-of': ['LogGroupName', 'LogDestination'],
}
}
def validate(self):
self.state = self.data.get('state', True)
if not self.state:
return
destination_type = self.data.get(
'LogDestinationType', 'cloud-watch-logs')
dvalidation = self.SchemaValidation[destination_type]
for r in dvalidation.get('required', ()):
if not self.data.get(r):
raise PolicyValidationError(
'Required %s missing for destination-type:%s' % (
r, destination_type))
for r in dvalidation.get('absent', ()):
if r in self.data:
raise PolicyValidationError(
'%s is prohibited for destination-type:%s' % (
r, destination_type))
if ('one-of' in dvalidation and
sum([1 for k in dvalidation['one-of'] if k in self.data]) != 1):
raise PolicyValidationError(
"Destination:%s Exactly one of %s required" % (
destination_type, ", ".join(dvalidation['one-of'])))
return self
def delete_flow_logs(self, client, rids):
flow_logs = client.describe_flow_logs(
Filters=[{'Name': 'resource-id', 'Values': rids}])['FlowLogs']
try:
results = client.delete_flow_logs(
FlowLogIds=[f['FlowLogId'] for f in flow_logs])
for r in results['Unsuccessful']:
self.log.exception(
'Exception: delete flow-log for %s: %s on %s',
r['ResourceId'], r['Error']['Message'])
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidParameterValue':
self.log.exception(
'delete flow-log: %s', e.response['Error']['Message'])
else:
raise
def process(self, resources):
client = local_session(self.manager.session_factory).client('ec2')
params = dict(self.data)
params.pop('type')
if self.data.get('state'):
params.pop('state')
model = self.manager.get_model()
params['ResourceIds'] = [r[model.id] for r in resources]
if not self.state:
self.delete_flow_logs(client, params['ResourceIds'])
return
params['ResourceType'] = self.RESOURCE_ALIAS[model.arn_type]
params['TrafficType'] = self.data.get('TrafficType', 'ALL').upper()
params['MaxAggregationInterval'] = self.data.get('MaxAggregationInterval', 600)
if self.data.get('LogDestinationType', 'cloud-watch-logs') == 'cloud-watch-logs':
self.process_log_group(self.data.get('LogGroupName'))
try:
results = client.create_flow_logs(**params)
for r in results['Unsuccessful']:
self.log.exception(
'Exception: create flow-log for %s: %s',
r['ResourceId'], r['Error']['Message'])
except ClientError as e:
if e.response['Error']['Code'] == 'FlowLogAlreadyExists':
self.log.exception(
'Exception: create flow-log: %s',
e.response['Error']['Message'])
else:
raise
def process_log_group(self, logroup):
client = local_session(self.manager.session_factory).client('logs')
try:
client.create_log_group(logGroupName=logroup)
except client.exceptions.ResourceAlreadyExistsException:
pass
|
py | 1a4a390295126f4164024a635c876c35f8edd328 | from django import forms
from .models import *
from django.contrib.auth.models import User
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('phone_number', 'profile_picture', 'city', 'country')
widgets = {
'phone_number': forms.NumberInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter your mobile number ...'
}
),
'profile_picture': forms.FileInput(
attrs={
'class': 'form-control',
}
),
'city': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter your city ...'
}
),
'country': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter your country ...'
}
),
}
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'email', 'password', 'first_name', 'last_name']
widgets = {
'username': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter your username ...'
}
),
'email': forms.EmailInput(
attrs={
'class': 'form-control',
}
),
'password': forms.TextInput(
attrs={
'class': 'form-control',
'readonly': True
}
),
'first_name': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter your First Name ...'
}
),
'last_name': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter your Last Name ...'
}
),
}
class PostAdForm(forms.ModelForm):
class Meta:
model = PostAd
fields = ['address', 'rooms', 'bathrooms', 'house_images']
widgets = {
'address': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter Address here ...'
}
),
'rooms': forms.NumberInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter Rooms here ...'
}
),
'bathrooms': forms.NumberInput(
attrs={
'class': 'form-control',
'placeholder': 'Enter Bathrooms here ...'
}
),
'house_images': forms.ClearableFileInput(
attrs={
'class': 'form-control'
}
),
} |
py | 1a4a3940419da5f8495f3749a70268ff580d8f11 | import re
import requests
import xbmc
from ..scraper import Scraper
from ..common import clean_title
class dlfile(Scraper):
domains = ['http://dl.dlfile.pro/2/']
name = "dlfile"
sources = []
def __init__(self):
self.base_link = 'http://dl.dlfile.pro/2/'
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_url= self.base_link
html = requests.get(start_url,timeout=5).content
match = re.compile('<a href="(.+?)">(.+?)</a>').findall(html)
for url,name in match:
if name[0]==' ':
name = name[1:]
name = name.replace('/','')
url = self.base_link+url
if title.lower().replace(' ','')==name.lower().replace(' ',''):
season_pull = "0%s"%season if len(season)<2 else season
episode_pull = "0%s"%episode if len(episode)<2 else episode
eppy_chec = 'S%sE%s' %(season_pull,episode_pull)
html2 = requests.get(url,timeout=5).content
match2 = re.compile('<a href="(.+?)"').findall(html2)
for url2 in match2:
if eppy_chec in url2:
url2 = url+'/'+url2
print 'mecheck'+url2
if '1080p' in url2:
qual = '1080p'
elif '720p' in url2:
qual = '720p'
elif '480p' in url2:
qual = '480p'
else:
qual = 'SD'
self.sources.append({'source': 'Direct', 'quality': qual, 'scraper': self.name, 'url': url2,'direct': True})
return self.sources
except Exception as e:
print repr(e)
pass
return []
def scrape_movie(self, title, year, imdb, debrid = False):
try:
start_url= self.base_link
html = requests.get(start_url,timeout=5).content
match = re.compile('<a href="(.+?)">(.+?)</a>').findall(html)
for url,name in match:
if '.20' in name:
name = name.split('20')[0]
elif '.19' in name:
name = name.split('20')[0]
else:pass
if clean_title(title).lower()==clean_title(name).lower():
if year in url:
url = self.base_link+url
if '1080p' in url:
qual = '1080p'
elif '720p' in url:
qual = '720p'
elif '480p' in url:
qual = '480p'
else:
qual = 'SD'
self.sources.append({'source': 'Direct', 'quality': qual, 'scraper': self.name, 'url': url,'direct': True})
return self.sources
except Exception as e:
print repr(e)
pass
return []
|
py | 1a4a395659e65a8a02c141418fbbee87b212c896 | import os
from baselines import logger
from baselines.common.vec_env import VecEnvWrapper
from gym.wrappers.monitoring import video_recorder
class VecVideoRecorder(VecEnvWrapper):
"""
Wrap VecEnv to record rendered image as mp4 video.
"""
def __init__(self, venv, directory, record_video_trigger, video_length=200):
"""
# Arguments
venv: VecEnv to wrap
directory: Where to save videos
record_video_trigger:
Function that defines when to start recording.
The function takes the current number of step,
and returns whether we should start recording or not.
video_length: Length of recorded video
"""
VecEnvWrapper.__init__(self, venv)
self.record_video_trigger = record_video_trigger
self.video_recorder = None
self.directory = os.path.abspath(directory)
if not os.path.exists(self.directory): os.mkdir(self.directory)
self.file_prefix = "vecenv"
self.file_infix = '{}'.format(os.getpid())
self.step_id = 0
self.video_length = video_length
self.recording = False
self.recorded_frames = 0
def reset(self):
obs = self.venv.reset()
self.start_video_recorder()
return obs
def start_video_recorder(self):
self.close_video_recorder()
base_path = os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.step_id))
self.video_recorder = video_recorder.VideoRecorder(
env=self.venv,
base_path=base_path,
metadata={'step_id': self.step_id}
)
self.video_recorder.capture_frame()
self.recorded_frames = 1
self.recording = True
def _video_enabled(self):
return self.record_video_trigger(self.step_id)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
self.step_id += 1
if self.recording:
self.video_recorder.capture_frame()
self.recorded_frames += 1
if self.recorded_frames > self.video_length:
logger.info("Saving video to ", self.video_recorder.path)
self.close_video_recorder()
elif self._video_enabled():
self.start_video_recorder()
return obs, rews, dones, infos
def close_video_recorder(self):
if self.recording:
self.video_recorder.close()
self.recording = False
self.recorded_frames = 0
def close(self):
VecEnvWrapper.close(self)
self.close_video_recorder()
def __del__(self):
self.close()
class VecVideoRecorderNamed(VecEnvWrapper):
"""
Wrap VecEnv to record rendered image as mp4 video.
"""
def __init__(self, venv, directory, video_name, record_video_trigger, video_length=200):
"""
# Arguments
venv: VecEnv to wrap
directory: Where to save videos
record_video_trigger:
Function that defines when to start recording.
The function takes the current number of step,
and returns whether we should start recording or not.
video_length: Length of recorded video
"""
#print("initializing vecvideorecordernamed")
#print(directory)
#print(video_name)
VecEnvWrapper.__init__(self, venv)
self.record_video_trigger = record_video_trigger
self.video_recorder = None
self.directory = os.path.abspath(directory)
if not os.path.exists(self.directory): os.mkdir(self.directory)
self.video_name = video_name
self.file_prefix = "vecenv"
self.file_infix = '{}'.format(os.getpid())
self.step_id = 0
self.video_length = video_length
self.recording = False
self.recorded_frames = 0
def reset(self):
obs = self.venv.reset()
self.start_video_recorder()
return obs
def start_video_recorder(self):
self.close_video_recorder()
base_path = os.path.join(self.directory, self.video_name)
self.video_recorder = video_recorder.VideoRecorder(
env=self.venv,
base_path=base_path,
metadata={'step_id': self.step_id}
)
self.video_recorder.capture_frame()
self.recorded_frames = 1
self.recording = True
def _video_enabled(self):
return self.record_video_trigger(self.step_id)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
self.step_id += 1
if self.recording:
self.video_recorder.capture_frame()
self.recorded_frames += 1
if self.recorded_frames > self.video_length:
logger.info("Saving video to ", self.video_recorder.path)
self.close_video_recorder()
elif self._video_enabled():
self.start_video_recorder()
return obs, rews, dones, infos
def close_video_recorder(self):
#print("!!closing video recorder!!")
if self.recording:
self.video_recorder.close()
self.recording = False
self.recorded_frames = 0
def close(self):
VecEnvWrapper.close(self)
self.close_video_recorder()
def __del__(self):
self.close()
|
py | 1a4a39fb6dc168e07d808784e089a3b4b0200158 | from collections import namedtuple
Instance = namedtuple('Instance', ('transform', 'shape', 'generator', 'materials'))
Material = namedtuple('Material', ('shadingEngine', 'surfaceShader', 'displacementShader', 'volumeShader'))
def createSphereWithAllShaders():
"""
Creates a poly sphere (pSphere1) with the following material.
place2dTexture1 ----> noise1 ----> bump1 ----> lambert1 ----\
displacementShader1 ----- initialShadingGroup
volumeFog1 ----/
You can see it with:
import maya.app.renderSetup.common.test.testScenes as scenes; scenes.createSphereWithAllShaders()
"""
pass
def createSurfaceShader(color):
"""
Creates a Surface Shader with the specified color.
Returns a tuple (shaderName, shadingEngineName)
"""
pass
def createAllShaders():
"""
place2dTexture# ----> noise# ----> bump# ----> blinn# ----\
displacementShader# ----- shadingGroup#
volumeFog# ----/
"""
pass
def assignShadingEngine(shape, shadingEngine, components='None'):
"""
Assign shading engine to shape.
"components" is an optional list of mesh face indices or nurbs surface patch indices
that can be used to specify per-face shading engine assignment.
A mesh index is given by an integer >= 0.
A surface patch is given by a tuple (span, section) where span and section are integers >= 0.
"""
pass
def createSceneWithMaterials():
"""
Create a test scene composed of
- 2 polySphere (instances of the same shape)
- 2 nurbsSphere (instances of the same shape)
First instance has whole shape material assignment.
Second instance has per-face shape material assignment.
- 1 polyCube without any per-face
- 1 directionalLight
Returns a set of Instance (named tuples) containing these 6 objects.
To see it in maya, just run python script:
import maya.app.renderSetup.common.test.testScenes as scenes; scenes.createSceneWithMaterials()
"""
pass
DefaultMaterial = ()
|
py | 1a4a3a870d1c658510630083fb005538c85b1195 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a bitcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
import binascii
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from contextlib import contextmanager
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from itertools import chain
from threading import RLock, Thread
import uuid
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
from test_framework.streams import StreamType
BIP0031_VERSION = 60000
MY_VERSION = 70015 # INVALID_CB_NO_BAN_VERSION
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
# from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MY_RELAY = 1
MAX_INV_SZ = 50000
MAX_PROTOCOL_RECV_PAYLOAD_LENGTH = 2 * 1024 * 1024
LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH = 1 * 1024 * 1024
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_XTHIN = (1 << 4)
NODE_BITCOIN_CASH = (1 << 5)
# Howmuch data will be read from the network at once
READ_BUFFER_SIZE = 8192
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Lock used to synchronize access to data required by loop running in NetworkThread.
# It must be locked, for example, when adding new NodeConn object, otherwise loop in
# NetworkThread may try to access partially constructed object.
network_thread_loop_lock = RLock()
# Network thread acquires network_thread_loop_lock at start of each iteration and releases
# it at the end. Since the next iteration is run immediately after that, lock is acquired
# almost all of the time making it difficult for other threads to also acquire this lock.
# To work around this problem, NetworkThread first acquires network_thread_loop_intent_lock
# and immediately releases it before acquiring network_thread_loop_lock.
# Other threads (e.g. the ones calling NodeConn constructor) acquire both locks before
# proceeding. The end result is that other threads wait at most one iteration of loop in
# NetworkThread.
network_thread_loop_intent_lock = RLock()
# ports used by chain type
NETWORK_PORTS = {
"mainnet" : 8333,
"testnet3" : 18333,
"stn" : 9333,
"regtest" : 18444
}
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def generator_based_serializator(fn):
def decorated(object_collection, *args, **kwargs):
first_elem = ser_compact_size(len(object_collection))
obj_generator = fn(object_collection, *args, **kwargs)
return b"".join(chain((first_elem,), obj_generator))
return decorated
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def ser_varint(v):
r = b""
length = 0
while True:
r += struct.pack("<B", (v & 0x7F) | (0x80 if length > 0 else 0x00))
if(v <= 0x7F):
return r[::-1] # Need as little-endian
v = (v >> 7) - 1
length += 1
def deser_varint(f):
ntot = 0
while True:
n = struct.unpack("<B", f.read(1))[0]
ntot = (n << 7) | (n & 0x7F)
if((n & 0x80) == 0):
return ntot
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
@generator_based_serializator
def ser_string(s):
return (s,) # return tuple with single member
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector.
@generator_based_serializator
def ser_vector(l, ser_function_name=""):
# using generator because of need for lazy evaluation
return (getattr(i, ser_function_name, i.serialize )() for i in l)
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
@generator_based_serializator
def ser_uint256_vector(l):
return (ser_uint256(i) for i in l)
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
@generator_based_serializator
def ser_string_vector(l):
return (ser_string(sv) for sv in l)
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
@generator_based_serializator
def ser_int_vector(l):
return (struct.pack("<i", i) for i in l)
def deser_varint_vector(f):
nit = deser_varint(f)
r = []
for i in range(nit):
t = deser_varint(f)
r.append(t)
return r
def ser_varint_vector(l):
r = ser_varint(len(l))
for v in l:
r += ser_varint(v)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Serialise a UUID association ID as a stream of bytes for sending over the network
def serialise_uuid_associd(assocId):
assocIdBytes = bytes()
if(assocId):
assocIdPlusType = b"".join((
struct.pack("<B", 0),
assocId.bytes
))
assocIdBytes = ser_string(assocIdPlusType)
return assocIdBytes
# Deserialise an association ID from the network into a UUID
def deserialise_uuid_associd(raw):
return uuid.UUID(bytes=raw[1:])
# Create a new random association ID
def create_association_id():
return uuid.uuid4()
# Objects that map to bitcoind objects, which can be serialized/deserialized
# Because the nVersion field has not been passed before the VERSION message the protocol uses an old format for the CAddress (missing nTime)
# This class handles that old format
class CAddressInVersion(object):
def __init__(self, ip="0.0.0.0", port=0):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2 # ip is 16 bytes on wire to handle v6
self.ip = ip
self.port = port
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b"".join((
struct.pack("<Q", self.nServices),
self.pchReserved,
socket.inet_aton(self.ip),
struct.pack(">H", self.port),))
return r
def __repr__(self):
return "CAddressInVersion(nServices=%i ip=%s port=%i)" % (self.nServices, self.ip, self.port)
# Handle new-style CAddress objects (with nTime)
class CAddress():
def __init__(self, ip="0.0.0.0", port=0):
self.nServices = 1
self.nTime = int(time.time())
self.pchReserved = b"\x00" * 10 + b"\xff" * 2 # ip is 16 bytes on wire to handle v6
self.ip = ip
self.port = port
def deserialize(self, f):
self.nTime = struct.unpack("<L", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<L", self.nTime)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i time=%d)" % (self.nServices, self.ip, self.port, self.nTime)
class CInv():
ERROR = 0
TX = 1
BLOCK = 2
COMPACT_BLOCK = 4
typemap = {
ERROR: "Error",
TX: "TX",
BLOCK: "Block",
COMPACT_BLOCK: "CompactBlock"
}
def __init__(self, t=ERROR, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b"".join((
struct.pack("<i", self.type),
ser_uint256(self.hash),))
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
def estimateMaxInvElements(max_payload_length=MAX_PROTOCOL_RECV_PAYLOAD_LENGTH):
return int((max_payload_length - 8) / (4 + 32))
class CProtoconf():
def __init__(self, number_of_fields=2, max_recv_payload_length=0, stream_policies=b"Default"):
self.number_of_fields = number_of_fields
self.max_recv_payload_length = max_recv_payload_length
self.stream_policies = stream_policies
def deserialize(self, f):
self.number_of_fields = deser_compact_size(f)
self.max_recv_payload_length = struct.unpack("<i", f.read(4))[0]
if self.number_of_fields > 1:
self.stream_policies = deser_string(f)
def serialize(self):
r = b""
r += ser_compact_size(self.number_of_fields)
r += struct.pack("<i", self.max_recv_payload_length)
if self.number_of_fields > 1:
r += ser_string(self.stream_policies)
return r
def __repr__(self):
return "CProtoconf(number_of_fields=%064x max_recv_payload_length=%064x stream_policies=%s)" \
% (self.number_of_fields, self.max_recv_payload_length, self.stream_policies)
class CBlockLocator():
def __init__(self, have=[]):
self.nVersion = MY_VERSION
self.vHave = have
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
ser_uint256_vector(self.vHave),))
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b"".join((
ser_uint256(self.hash),
struct.pack("<I", self.n),))
return r
def __hash__(self):
return self.hash + self.n
def __eq__(self, other):
return self.n == other.n and self.hash == other.hash
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b"".join((
self.prevout.serialize(),
ser_string(self.scriptSig),
struct.pack("<I", self.nSequence),))
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b"".join((
struct.pack("<q", self.nValue),
ser_string(self.scriptPubKey),))
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
ser_vector(self.vin),
ser_vector(self.vout),
struct.pack("<I", self.nLockTime),))
return r
# Recalculate the txid
def rehash(self):
self.sha256 = None
self.calc_sha256()
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(
hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
self.rehash()
return "CTransaction(hash=%s nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.hash, self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None, json_notification=None):
if json_notification is None:
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
else:
self.nVersion = json_notification["version"]
self.hashPrevBlock = uint256_from_str(hex_str_to_bytes(json_notification["hashPrevBlock"])[::-1])
self.hashMerkleRoot = uint256_from_str(hex_str_to_bytes(json_notification["hashMerkleRoot"])[::-1])
self.nTime = json_notification["time"]
self.nBits = json_notification["bits"]
self.nNonce = json_notification["nonce"]
self.rehash()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
ser_uint256(self.hashPrevBlock),
ser_uint256(self.hashMerkleRoot),
struct.pack("<I", self.nTime),
struct.pack("<I", self.nBits),
struct.pack("<I", self.nNonce),))
return r
def calc_sha256(self):
if self.sha256 is None:
r = b"".join((
struct.pack("<i", self.nVersion),
ser_uint256(self.hashPrevBlock),
ser_uint256(self.hashMerkleRoot),
struct.pack("<I", self.nTime),
struct.pack("<I", self.nBits),
struct.pack("<I", self.nNonce),))
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
self.rehash()
return "CBlockHeader(hash=%s nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.hash, self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b"".join((
super(CBlock, self).serialize(),
ser_vector(self.vtx),))
return r
# Calculate the merkle root given a vector of transaction hashes
def get_merkle_root(self, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i + 1, len(hashes) - 1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
self.rehash()
return "CBlock(hash=%s nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.hash, self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert():
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
struct.pack("<q", self.nRelayUntil),
struct.pack("<q", self.nExpiration),
struct.pack("<i", self.nID),
struct.pack("<i", self.nCancel),
ser_int_vector(self.setCancel),
struct.pack("<i", self.nMinVer),
struct.pack("<i", self.nMaxVer),
ser_string_vector(self.setSubVer),
struct.pack("<i", self.nPriority),
ser_string(self.strComment),
ser_string(self.strStatusBar),
ser_string(self.strReserved),))
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert():
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b"".join((
ser_string(self.vchMsg),
ser_string(self.vchSig),))
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction():
def __init__(self, index=0, tx=None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self):
r = b"".join((
ser_compact_size(self.index),
self.tx.serialize(),))
return r
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(
struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
def serialize(self):
r = b"".join((
self.header.serialize(),
struct.pack("<Q", self.nonce),
ser_compact_size(self.shortids_length),
b"".join( struct.pack("<Q", x)[0:6] for x in self.shortids), # We only want the first 6 bytes
ser_vector(self.prefilled_txn),))
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids=None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(
PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(
PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [key0, key1]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=[0]):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [PrefilledTransaction(i, block.vtx[i])
for i in prefill_list]
self.shortids = []
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
# callback message for dsnt-enabled transactions
class CallbackMessage():
# 127.0.0.1 as network-order bytes
LOCAL_HOST_IP = 0x7F000001
MAX_INT64 = 0xFFFFFFFFFFFFFFFF
IPv6_version = 129
IPv4_version = 1
def __init__(self, version=1, ip_addresses=[LOCAL_HOST_IP], inputs=[0]):
self.version = version
self.ip_addresses = ip_addresses
self.ip_address_count = len(ip_addresses)
self.inputs = inputs
def ser_addrs(self, addrs):
rs = b""
for addr in addrs:
if (self.version == self.IPv6_version):
rs += struct.pack('>QQ', (addr >> 64) & self.MAX_INT64, addr & self.MAX_INT64)
else:
rs += struct.pack("!I", addr)
return rs
def deser_addrs(self, f):
addrs = []
for i in range(self.ip_address_count):
if (self.version == self.IPv6_version):
a, b = struct.unpack('>QQ', f.read(16))
unpacked = (a << 64) | b
addrs.append(unpacked)
else:
addrs.append(struct.unpack("!I", f.read(4))[0])
return addrs
def deserialize(self, f):
self.version = struct.unpack("<B", f.read(1))[0]
self.ip_address_count = deser_compact_size(f)
self.ip_addresses = self.deser_addrs(f)
self.inputs = deser_varint_vector(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.version)
r += ser_compact_size(self.ip_address_count)
r += self.ser_addrs(self.ip_addresses)
r += ser_varint_vector(self.inputs)
return r
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes=None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b"".join((
ser_uint256(self.blockhash),
ser_compact_size(len(self.indexes)),
b"".join(ser_compact_size(x) for x in self.indexes)))
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x - last_index - 1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x + last_index + 1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions=None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self):
r = b"".join((
ser_uint256(self.blockhash),
ser_vector(self.transactions),))
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddressInVersion()
self.addrFrom = CAddressInVersion()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
self.assocID = create_association_id()
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddressInVersion()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddressInVersion()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
try:
uuidBytes = deser_string(f)
self.assocID = deserialise_uuid_associd(uuidBytes)
except:
self.assocID = None
except:
self.nRelay = 0
else:
self.nRelay = 0
self.assocID = None
def serialize(self):
r = b"".join((
struct.pack("<i", self.nVersion),
struct.pack("<Q", self.nServices),
struct.pack("<q", self.nTime),
self.addrTo.serialize(),
self.addrFrom.serialize(),
struct.pack("<Q", self.nNonce),
ser_string(self.strSubVer),
struct.pack("<i", self.nStartingHeight),
struct.pack("<b", self.nRelay),
serialise_uuid_associd(self.assocID),
))
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i assocID=%s)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay, str(self.assocID))
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_createstream():
command = b"createstrm"
def __init__(self, stream_type, stream_policy=b"", assocID=None):
self.assocID = assocID
self.stream_type = stream_type
self.stream_policy = stream_policy
def deserialize(self, f):
uuidBytes = deser_string(f)
self.assocID = deserialise_uuid_associd(uuidBytes)
self.stream_type = struct.unpack("<B", f.read(1))[0]
self.stream_policy = deser_string(f)
def serialize(self):
return b"".join((
serialise_uuid_associd(self.assocID),
struct.pack("<B", self.stream_type),
ser_string(self.stream_policy),
))
def __repr__(self):
return "msg_createstream(assocID=%s stream_type=%i stream_policy=%s)" % (str(self.assocID), self.stream_type,
str(self.stream_policy))
class msg_streamack():
command = b"streamack"
def __init__(self, assocID=None, stream_type=StreamType.UNKNOWN.value):
self.assocID = assocID
self.stream_type = stream_type
def deserialize(self, f):
uuidBytes = deser_string(f)
self.assocID = deserialise_uuid_associd(uuidBytes)
self.stream_type = struct.unpack("<B", f.read(1))[0]
def serialize(self):
return b"".join((
serialise_uuid_associd(self.assocID),
struct.pack("<B", self.stream_type),
))
def __repr__(self):
return "msg_streamack(assocID=%s stream_type=%i)" % (str(self.assocID), self.stream_type)
class msg_protoconf():
command = b"protoconf"
def __init__(self, protoconf=None):
if protoconf is None:
self.protoconf = CProtoconf(2,0,b"")
else:
self.protoconf = protoconf
def deserialize(self, f):
self.inv = self.protoconf.deserialize(f)
def serialize(self):
r = b""
r += self.protoconf.serialize()
return r
def __repr__(self):
return "msg_protoconf(protoconf=%s)" % (repr(self.protoconf))
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert():
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
return self.alert.serialize()
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b"".join((
self.locator.serialize(),
ser_uint256(self.hashstop),))
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31():
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
return struct.pack("<Q", self.nonce)
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
return struct.pack("<Q", self.nonce)
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self, locator_have=[]):
self.locator = CBlockLocator(locator_have)
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b"".join((
self.locator.serialize(),
ser_uint256(self.hashstop),))
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self, message=b"", code=0, reason=b"", data=0):
self.message = message
self.code = code
self.reason = reason
self.data = data
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
return struct.pack("<Q", self.feerate)
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self, announce=False):
self.announce = announce
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b"".join((
struct.pack("<?", self.announce),
struct.pack("<Q", self.version),))
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids=None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
return self.header_and_shortids.serialize()
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
return self.block_txn_request.serialize()
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
return self.block_transactions.serialize()
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_notfound():
command = b"notfound"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_notfound(inv=%s)" % (repr(self.inv))
# Data for the merkle proof node part of the double-spend detected P2P message
class MerkleProofNode():
def __init__(self, node=0):
self.nodeType = 0
self.node = node
def deserialize(self, f):
self.nodeType = struct.unpack("<B", f.read(1))[0]
# Currently only type 0 is supported (it means node is always uint256)
assert(self.nodeType == 0)
self.node = deser_uint256(f)
def serialize(self):
r = b"".join((
struct.pack("<B", self.nodeType),
ser_uint256(self.node),))
return r
def __repr__(self):
return "MerkleProofNode(type=%i node=%064x)" % (self.nodeType, self.node)
# Data for the merkle proof part of the double-spend detected P2P message
class DSMerkleProof():
def __init__(self, txIndex=0, tx=CTransaction(), merkleRoot=0, proof=None, json_notification=None):
if json_notification is None:
self.txIndex = txIndex
self.tx = tx
self.merkleRoot = merkleRoot
if proof is None:
self.proof = []
else:
self.proof = proof
else:
self.txIndex = json_notification["index"]
self.tx = FromHex(CTransaction(), json_notification["txOrId"])
# Only merkleRoot target type is currently supported
assert(json_notification["targetType"] == "merkleRoot")
self.merkleRoot = uint256_from_str(hex_str_to_bytes(json_notification["target"])[::-1])
self.proof = []
for node in json_notification["nodes"]:
self.proof.append(MerkleProofNode(uint256_from_str(hex_str_to_bytes(node)[::-1])))
def deserialize(self, f):
flags = struct.unpack("<B", f.read(1))[0]
# Should always be 5
assert(flags == 5)
self.txIndex = deser_compact_size(f)
# Length of transaction bytes is deserialized as required by the specification, but we don't actually need it to deserialize the transaction
deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
self.merkleRoot = deser_uint256(f)
self.proof = deser_vector(f, MerkleProofNode)
def serialize(self):
txSerialized = self.tx.serialize()
r = b"".join((
struct.pack("<B", 5),
ser_compact_size(self.txIndex),
ser_compact_size(len(txSerialized)),
txSerialized,
ser_uint256(self.merkleRoot),
ser_vector(self.proof),))
return r
def __repr__(self):
return "DSMerkleProof(txIndex=%i tx=%s merkleRoot=%064x proof=%s)" % (self.txIndex, repr(self.tx), self.merkleRoot, repr(self.proof))
# Data for the block details part of the double-spend detected P2P message
class BlockDetails():
def __init__(self, blockHeaders=None, merkleProof=DSMerkleProof(), json_notification=None):
if json_notification is None:
if blockHeaders is None:
self.blockHeaders = []
else:
self.blockHeaders = blockHeaders
self.merkleProof = merkleProof
else:
self.blockHeaders = []
for blockHeader in json_notification["headers"]:
self.blockHeaders.append(CBlockHeader(json_notification=blockHeader))
self.merkleProof = DSMerkleProof(json_notification=json_notification["merkleProof"])
def deserialize(self, f):
self.blockHeaders = deser_vector(f, CBlockHeader)
self.merkleProof = DSMerkleProof()
self.merkleProof.deserialize(f)
def serialize(self):
r = b"".join((
ser_vector(self.blockHeaders),
self.merkleProof.serialize(),))
return r
def __repr__(self):
return "BlockDetails(blockHeaders=%s merkleProof=%s)" % (repr(self.blockHeaders), repr(self.merkleProof))
# Double-spend detected P2P message
class msg_dsdetected():
command = b"dsdetected"
def __init__(self, version=1, blocksDetails=None, json_notification=None):
if (json_notification is None):
self.version = version
if blocksDetails is None:
self.blocksDetails = []
else:
self.blocksDetails = blocksDetails
else:
self.version = json_notification["version"]
self.blocksDetails = []
for json_blockDetails in json_notification["blocks"]:
self.blocksDetails.append(BlockDetails(json_notification=json_blockDetails))
def deserialize(self, f):
self.version = struct.unpack("<H", f.read(2))[0]
self.blocksDetails = deser_vector(f, BlockDetails)
def serialize(self):
r = b"".join((
struct.pack("<H", self.version),
ser_vector(self.blocksDetails),))
return r
def __repr__(self):
return "msg_dsdetected(version=%i blocksDetails=%s)" % (self.version, repr(self.blocksDetails))
class NodeConnCB():
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.msg_timestamp = {}
self.last_message = {}
self.time_index = 0
self.msg_index = defaultdict(int)
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
self.msg_timestamp[command] = time.time()
self.msg_index[command] = self.time_index
self.time_index +=1
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
raise
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_streamack(self, conn, message): pass
def on_protoconf(self, conn, message): pass
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
self.send_protoconf(conn)
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def on_notfound(self, conn, message): pass
def send_protoconf(self, conn):
conn.send_message(msg_protoconf(CProtoconf(2, MAX_PROTOCOL_RECV_PAYLOAD_LENGTH, b"BlockPriority,Default")))
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
def test_function(): return not self.connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def clear_messages(self):
with mininode_lock:
self.message_count.clear()
def wait_for_block(self, blockhash, timeout=60):
def test_function(): return self.last_message.get(
"block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
def test_function(): return self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
def test_function(): return self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60, check_interval=0.05):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError(
"wait_for_inv() will only verify the first inv object")
def test_function(): return self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock, check_interval=check_interval)
def wait_for_verack(self, timeout=60):
def test_function(): return self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_reject(self, timeout=60):
def test_function(): return self.message_count["reject"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_protoconf(self, timeout=60):
def test_function(): return self.message_count["protoconf"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_streamack(self, timeout=60):
def test_function(): return self.message_count["streamack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
# use ping to guarantee that previously sent p2p messages were processed
self.send_message(msg_ping(nonce=self.ping_counter))
def test_function():
if not self.last_message.get("pong"):
return False
if self.last_message["pong"].nonce != self.ping_counter:
return False
# after we receive pong we need to check that there are no async
# block/transaction processes still running
activity = self.connection.rpc.getblockchainactivity()
return sum(activity.values()) == 0
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
@contextmanager
def temporary_override_callback(self, **callbacks):
old_callbacks = {cb_name: getattr(self, cb_name) for cb_name in callbacks.keys()}
for cb_name, cb in callbacks.items():
setattr(self, cb_name, cb)
yield
for cb_name, cb in old_callbacks.items():
setattr(self, cb_name, cb)
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"protoconf": msg_protoconf,
b"verack": msg_verack,
b"createstrm": msg_createstream,
b"streamack": msg_streamack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn,
b"notfound": msg_notfound
}
MAGIC_BYTES = {
"mainnet": b"\xe3\xe1\xf3\xe8",
"testnet3": b"\xf4\xe5\xf3\xf4",
"stn": b"\xfb\xce\xc4\xf9",
"regtest": b"\xda\xb5\xbf\xfa",
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True,
strSubVer=None, assocID=None, nullAssocID=False):
# Lock must be acquired when new object is added to prevent NetworkThread from trying
# to access partially constructed object or trying to call callbacks before the connection
# is established.
with network_thread_loop_intent_lock, network_thread_loop_lock:
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = bytearray()
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
self.maxInvElements = CInv.estimateMaxInvElements(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH)
self.strSubVer = strSubVer
self.assocID = assocID
if(assocID):
send_version = False
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
if(strSubVer):
vt.strSubVer = strSubVer
if(nullAssocID):
vt.assocID = None
self.send_message(vt, True)
self.assocID = vt.assocID
logger.info('Connecting to Bitcoin Node: %s:%d' %
(self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" %
(self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" %
(self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = bytearray()
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
with mininode_lock:
t = self.recv(READ_BUFFER_SIZE)
if len(t) > 0:
self.recvbuf += t
while True:
msg = self.got_data()
if msg == None:
break
self.got_message(msg)
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
del self.sendbuf[:sent]
def got_data(self):
try:
with mininode_lock:
if len(self.recvbuf) < 4:
return None
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return None
command = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0]
payloadlen = struct.unpack(
"<i", self.recvbuf[4 + 12:4 + 12 + 4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + payloadlen:
return None
msg = self.recvbuf[4 + 12 + 4:4 + 12 + 4 + payloadlen]
self.recvbuf = self.recvbuf[4 + 12 + 4 + payloadlen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return None
command = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0]
payloadlen = struct.unpack(
"<i", self.recvbuf[4 + 12:4 + 12 + 4])[0]
checksum = self.recvbuf[4 + 12 + 4:4 + 12 + 4 + 4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + payloadlen:
return None
msg = self.recvbuf[4 + 12 + 4 + 4:4 + 12 + 4 + 4 + payloadlen]
h = sha256(sha256(msg))
if checksum != h[:4]:
raise ValueError(
"got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4 + 12 + 4 + 4 + payloadlen:]
if command not in self.messagemap:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (
self.dstaddr, self.dstport, command, repr(msg)))
raise ValueError("Unknown command: '%s'" % (command))
f = BytesIO(msg)
m = self.messagemap[command]()
m.deserialize(f)
return m
except Exception as e:
logger.exception('got_data:', repr(e))
raise
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr,
self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
NetworkThread_should_stop = False
def StopNetworkThread():
global NetworkThread_should_stop
NetworkThread_should_stop = True
class NetworkThread(Thread):
poll_timeout = 0.1
def run(self):
while mininode_socket_map and not NetworkThread_should_stop:
with network_thread_loop_intent_lock:
# Acquire and immediately release lock.
# This allows other threads to more easily acquire network_thread_loop_lock by
# acquiring (and holding) network_thread_loop_intent_lock first since NetworkThread
# will block on trying to acquire network_thread_loop_intent_lock in the line above.
# If this was not done, other threads would need to wait for a long time (>10s) for
# network_thread_loop_lock since it is released only briefly between two loop iterations.
pass
with network_thread_loop_lock:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[obj.handle_close() for obj in disconnected]
try:
asyncore.loop(NetworkThread.poll_timeout, use_poll=True, map=mininode_socket_map, count=1)
except Exception as e:
# All exceptions are caught to prevent them from taking down the network thread.
# Since the error cannot be easily reported, it is just logged assuming that if
# the error is relevant, the test will detect it in some other way.
logger.warning("mininode NetworkThread: asyncore.loop() failed! " + str(e))
logger.debug("Network thread closing")
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
py | 1a4a3af71c072d23af1985568965ad4155127502 | from typing import Optional, Sequence, List
from gvm.typing import unpack_type_argument, merge_sequence_type, make_optional_type, make_sequence_type, is_subclass
def test_unpack_type_arguments():
assert unpack_type_argument(Optional[int]) is int
assert unpack_type_argument(Sequence[int]) is int
assert unpack_type_argument(List[int]) is List[int]
assert unpack_type_argument(int) is int
def test_merge_sequence_type():
assert merge_sequence_type(int, int) == Sequence[int]
assert merge_sequence_type(Sequence[int], int) == Sequence[int]
assert merge_sequence_type(int, Sequence[int]) == Sequence[int]
assert merge_sequence_type(Sequence[int], Sequence[int]) == Sequence[int]
assert merge_sequence_type(Optional[int], Sequence[int]) == Sequence[int]
assert merge_sequence_type(Sequence[int], Optional[int]) == Sequence[int]
assert merge_sequence_type(Optional[int], Optional[int]) == Sequence[int]
assert merge_sequence_type(int, Optional[int]) == Sequence[int]
assert merge_sequence_type(Optional[int], int) == Sequence[int]
def test_make_sequence_type():
assert make_sequence_type(int) == Sequence[int]
assert make_sequence_type(Optional[int]) == Sequence[int]
assert make_sequence_type(Sequence[int]) == Sequence[int]
def test_make_optional_type():
assert make_optional_type(int) == Optional[int]
assert make_optional_type(Optional[int]) == Optional[int]
assert make_optional_type(Sequence[int]) == Sequence[int]
def test_is_subclass():
class_a = type('A', (), {})
class_b = type('B', (class_a,), {})
class_c = type('C', (class_a,), {})
assert is_subclass(int, int)
assert is_subclass(bool, int)
assert is_subclass(Sequence[int], Sequence[int])
assert is_subclass(Sequence[bool], Sequence[int])
assert not is_subclass(Sequence[bool], Sequence[str])
assert is_subclass(Optional[bool], Optional[int])
assert not is_subclass(Optional[str], Optional[int])
assert is_subclass(class_c, class_a)
assert not is_subclass(class_a, class_c)
assert is_subclass(class_b, class_a)
assert not is_subclass(class_a, class_b)
assert not is_subclass(class_b, class_c)
assert not is_subclass(class_c, class_b)
|
py | 1a4a3b3a130f93e59e3b080f76793afaf29798dd | import os
import shutil
import click
import jinja2
import pdfkit
import yaml
__author__ = "Kevin Ciarniello"
__copyright__ = "Copyright 2017, Kevin Ciarniello"
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Kevin Ciarniello"
__email__ = "[email protected]"
# Template defaults
defaults = {
'labels': None,
}
def get_theme_directory():
"""
Gets the theme directory
:return: a string of the themes directory
"""
return os.path.abspath('theme')
def read_yaml(filename):
"""
Reads the yaml file in and converts it to a yaml dict
:param filename: the file to convert
:return: a dictionary from the yaml
"""
with open(filename, 'rt') as f:
return yaml.load(f)
def render(filename, variables):
"""
Grabs the jinja2 file and renders it
:param filename: the jinja2 file to render
:param variables:
:return:
"""
with open(filename, 'rt') as f:
filename = jinja2.Template(f.read())
return filename.render(**variables)
def jinja2_files(source, files):
"""
Setup an ignore method for the copy, we want to ignore the .jinja2 files
:param source: the source directory
:param files: all the files from the source directory
:return: a list of files that don't include .jinja2
"""
return [filename for filename in files if filename.endswith('.jinja2')]
def build(data, config, output_dir):
"""
Build the HTML or the PDF to the output_dir
:param data:
:param config:
:param output_dir:
:return:
"""
variables = defaults.copy()
variables.update(data)
variables['config'] = config
# Clean the output directory
shutil.rmtree(output_dir, ignore_errors=True)
# Copy
shutil.copytree(get_theme_directory(), output_dir, ignore=jinja2_files)
# Get all the .jinja2 files
files = jinja2_files(None, os.listdir(get_theme_directory()))
for filename in files:
output_file = os.path.join(get_theme_directory(), filename)
html = render(output_file, variables)
# Create HTML type names
rendered_file = filename.replace('.jinja2', '.html')
# Remove any unusual characters
output_html = html.encode('ascii', 'ignore').decode('ascii')
# Write to the file
with open(os.path.join(output_dir, rendered_file), 'w+') as f:
f.write(output_html)
def generate_html(config, data):
"""
Generate the HTML
:param config:
:param data:
:return:
"""
output_dir = config.get('output_dir', 'build')
build(data, config, output_dir)
def generate_pdf(config, data):
"""
Generate a PDF from the HTML file
:param config:
:param data:
:return:
"""
output_dir = config.get('output_dir', 'build')
filename = config.get('name') + " " + str(config.get('year'))
output_file = os.path.join(output_dir, filename.strip().replace(" ", "-") + '-resume.pdf')
input_file = os.path.join(output_dir, 'index.html')
if not os.path.exists(input_file):
generate_html(config, data)
print(input_file)
if os.path.exists(input_file):
convert_html_to_pdf(input_file, output_file)
def convert_html_to_pdf(source_html, output_filename):
"""
Write the html to a PDF file
:param source_html: the source HTML file
:param output_filename: the output PDF file
:return: the error status
"""
# Generate PDF from a html file.
pdfkit.from_file(source_html, output_filename)
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('resume_file', nargs=1, required=1, type=click.Path())
@click.option('--generate', '-g', default='html',
help="Generate a type [default: html], html or pdf")
@click.option('--directory', '-d', default='build',
help="Output directory for the build files. [default: build]")
def main(resume_file, generate, directory):
"""
Entry function for the script to handle command arguments
and run appropriate build like 'html' and 'pdf'.
"""
# read resume data and config with some defaults
resume_data = read_yaml(resume_file)
config = resume_data.get('config', {})
if directory:
config['output_dir'] = directory
else:
config.setdefault('output_dir', directory)
# build based on the given format
commands = {'html': generate_html, 'pdf': generate_pdf}
return commands[generate](config, resume_data)
if __name__ == '__main__':
main()
|
py | 1a4a3b412f9857e1d01415c67d0b05a5f7d89ada | from xcconfig import xcconfig |
py | 1a4a3b54e711a86144834dae67187a961d55c6a7 | #
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
from decimal import Decimal
from typing import Union
import pytest
from enforce_typing import enforce_types
from ocean_lib.config import Config
from ocean_lib.models.bfactory import BFactory
from ocean_lib.models.bpool import BPool
from ocean_lib.models.btoken import BToken
from ocean_lib.models.test.conftest import alice_info
from ocean_lib.ocean.util import get_bfactory_address
from ocean_lib.web3_internal.currency import to_wei
from ocean_lib.web3_internal.wallet import Wallet
from web3.main import Web3
HUGEINT = 2 ** 255
def test_notokens_basic(
OCEAN_address, network, web3, config, alice_wallet, alice_address
):
"""Tests deployment of a pool without tokens."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
assert not pool.isPublicSwap()
assert not pool.isFinalized()
assert not pool.isBound(OCEAN_address)
assert pool.getNumTokens() == 0
assert pool.getCurrentTokens() == []
with pytest.raises(Exception):
pool.getFinalTokens() # pool's not finalized
assert pool.getSwapFee() == to_wei("1e-6")
assert pool.getController() == alice_address
assert str(pool)
with pytest.raises(Exception):
pool.finalize(from_wallet=alice_wallet) # can't finalize if no tokens
def test_setSwapFee_works(network, config, web3, alice_wallet):
"""Tests that a swap fee can be set on the pool by the controller of that pool."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
pool.setSwapFee(to_wei("0.011"), from_wallet=alice_wallet)
assert pool.getSwapFee() == to_wei("0.011")
def test_setSwapFee_fails(
network, config, web3, alice_wallet, alice_address, bob_wallet, bob_address
):
"""Tests that someone who isn't a controller can not set the swap fee."""
factory = BFactory(web3, get_bfactory_address(config.address_file, network))
pool_address = factory.newBPool(alice_wallet)
pool = BPool(web3, pool_address)
with pytest.raises(Exception):
pool.setSwapFee(
to_wei("0.011"), from_wallet=bob_wallet
) # not ok, bob isn't controller
pool.setController(bob_address, from_wallet=alice_wallet)
pool.setSwapFee(to_wei("0.011"), from_wallet=bob_wallet) # ok now
def test_setController(
network, config, web3, alice_wallet, alice_address, bob_wallet, bob_address
):
"""Tests that the controller of a pool can be changed."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
pool.setController(bob_address, from_wallet=alice_wallet)
assert pool.getController() == bob_address
pool.setController(alice_address, from_wallet=bob_wallet)
assert pool.getController() == alice_address
def test_setPublicSwap(network, config, web3, alice_wallet):
"""Tests that a pool can be set as public."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
pool.setPublicSwap(True, from_wallet=alice_wallet)
assert pool.isPublicSwap()
pool.setPublicSwap(False, from_wallet=alice_wallet)
assert not pool.isPublicSwap()
def test_2tokens_basic(network, config, web3, T1, T2, alice_wallet, alice_address):
"""Tests the deployment of a pool containing 2 tokens (basic happy flow)."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
assert T1.address != T2.address
assert T1.address != pool.address
assert T1.balanceOf(alice_address) >= to_wei(90)
_ = T2.balanceOf(alice_address) >= to_wei(10)
with pytest.raises(Exception): # can't bind until we approve
pool.bind(T1.address, to_wei(90), to_wei(9), from_wallet=alice_wallet)
# Bind two tokens to the pool
T1.approve(pool.address, to_wei(90), from_wallet=alice_wallet)
T2.approve(pool.address, to_wei(10), from_wallet=alice_wallet)
assert T1.allowance(alice_address, pool.address) == to_wei(90)
assert T2.allowance(alice_address, pool.address) == to_wei(10)
assert not pool.isBound(T1.address) and not pool.isBound(T1.address)
pool.bind(T1.address, to_wei(90), to_wei(9), from_wallet=alice_wallet)
pool.bind(T2.address, to_wei(10), to_wei(1), from_wallet=alice_wallet)
assert pool.isBound(T1.address) and pool.isBound(T2.address)
assert pool.getNumTokens() == 2
assert pool.getCurrentTokens() == [T1.address, T2.address]
assert pool.getDenormalizedWeight(T1.address) == to_wei(9)
assert pool.getDenormalizedWeight(T2.address) == to_wei(1)
assert pool.getTotalDenormalizedWeight() == to_wei(10)
assert pool.getNormalizedWeight(T1.address) == to_wei("0.9")
assert pool.getNormalizedWeight(T2.address) == to_wei("0.1")
assert pool.getBalance(T1.address) == to_wei(90)
assert pool.getBalance(T2.address) == to_wei(10)
assert str(pool)
def test_unbind(network, config, web3, T1, T2, alice_wallet):
"""Tests that a pool can be unbound."""
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 1, 1, 1, 1
)
pool.unbind(T1.address, from_wallet=alice_wallet)
assert pool.getNumTokens() == 1
assert pool.getCurrentTokens() == [T2.address]
assert pool.getBalance(T2.address) == to_wei(1)
def test_finalize(network, config, web3, T1, T2, alice_address, alice_wallet):
"""Tests that a pool containing tokens can be finalized."""
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
assert not pool.isPublicSwap()
assert not pool.isFinalized()
assert pool.totalSupply() == 0
assert pool.balanceOf(alice_address) == 0
assert pool.allowance(alice_address, pool.address) == 0
pool.finalize(from_wallet=alice_wallet)
assert str(pool) != ""
assert pool.isPublicSwap()
assert pool.isFinalized()
assert pool.totalSupply() == to_wei(100)
assert pool.balanceOf(alice_address) == to_wei(100)
assert pool.allowance(alice_address, pool.address) == 0
assert pool.getFinalTokens() == [T1.address, T2.address]
assert pool.getCurrentTokens() == [T1.address, T2.address]
def test_public_pool(network, config, bob_wallet, alice_ocean):
"""Tests successful transfers inside a public pool."""
alice = alice_info()
alice_address = alice.address
bob_address = bob_wallet.address
T1 = alice.T1
T2 = alice.T2
pool = _createPoolWith2Tokens(
network,
config,
alice_ocean.web3,
alice.T1,
alice.T2,
alice.wallet,
90,
10,
9,
1,
)
BPT = pool
# alice give Bob some tokens
alice.T1.transfer(bob_wallet.address, to_wei(100), from_wallet=alice.wallet)
alice.T2.transfer(bob_wallet.address, to_wei(100), from_wallet=alice.wallet)
# verify holdings
assert alice.T1.balanceOf(alice.address) == to_wei(1000 - 90 - 100) # 810
assert alice.T2.balanceOf(alice.address) == to_wei(1000 - 10 - 100) # 890
assert BPT.balanceOf(alice.address) == to_wei(0)
assert alice.T1.balanceOf(bob_address) == to_wei(100)
assert alice.T2.balanceOf(bob_address) == to_wei(100)
assert BPT.balanceOf(bob_address) == to_wei(0)
assert T1.balanceOf(pool.address) == to_wei(90)
assert T2.balanceOf(pool.address) == to_wei(10)
assert BPT.balanceOf(pool.address) == to_wei(0)
# finalize
pool = BPool(alice_ocean.web3, pool.address)
pool.finalize(from_wallet=alice.wallet)
# verify holdings
assert alice.T1.balanceOf(alice.address) == to_wei(1000 - 90 - 100)
assert alice.T2.balanceOf(alice.address) == to_wei(1000 - 10 - 100)
assert BPT.balanceOf(alice.address) == to_wei(100) # new!
assert T1.balanceOf(pool.address) == to_wei(90)
assert T2.balanceOf(pool.address) == to_wei(10)
assert BPT.balanceOf(pool.address) == to_wei(0)
# bob join pool. Wants 10 BPT
T1.approve(pool.address, to_wei(100), from_wallet=bob_wallet)
T2.approve(pool.address, to_wei(100), from_wallet=bob_wallet)
pool.joinPool(
poolAmountOut=to_wei(10), # 10 BPT
maxAmountsIn=[to_wei(100), to_wei(100)],
from_wallet=bob_wallet,
)
# verify holdings
assert T1.balanceOf(alice_address) == to_wei(1000 - 90 - 100) # 810
assert T2.balanceOf(alice_address) == to_wei(1000 - 10 - 100) # 890
assert BPT.balanceOf(alice_address) == to_wei(100)
assert T1.balanceOf(bob_address) == to_wei(100 - 9) # 91
assert T2.balanceOf(bob_address) == to_wei(100 - 1) # 99
assert BPT.balanceOf(bob_address) == to_wei(10)
assert T1.balanceOf(pool.address) == to_wei(90 + 9) # 99
assert T2.balanceOf(pool.address) == to_wei(10 + 1) # 11
assert BPT.balanceOf(pool.address) == to_wei(0)
# bob sells 2 BPT
# -this is where BLabs fee kicks in. But the fee is currently set to 0.
pool.exitPool(
poolAmountIn=to_wei(2),
minAmountsOut=[to_wei(0), to_wei(0)],
from_wallet=bob_wallet,
)
assert T1.balanceOf(bob_address) == 92800000000000000018 # 92.8
assert T2.balanceOf(bob_address) == 99200000000000000002 # 99.2
assert BPT.balanceOf(bob_address) == to_wei(8)
# bob buys 5 more BPT
pool.joinPool(
poolAmountOut=to_wei(5),
maxAmountsIn=[to_wei(90), to_wei(90)],
from_wallet=bob_wallet,
)
assert BPT.balanceOf(bob_address) == to_wei(13)
# bob fully exits
pool.exitPool(poolAmountIn=to_wei(13), minAmountsOut=[0, 0], from_wallet=bob_wallet)
assert BPT.balanceOf(bob_address) == to_wei(0)
block = alice_ocean.web3.eth.block_number
block_confirmations = alice_ocean.config.block_confirmations.value
join_log = pool.get_join_logs(block - (block_confirmations + 1), block)[0]
assert join_log["args"]["tokenIn"] == T1.address
def test_rebind_more_tokens(network, config, web3, T1, T2, alice_wallet):
"""Tests that we can rebind more tokens on a pool."""
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
# insufficient allowance
with pytest.raises(Exception):
pool.rebind(T1.address, to_wei(120), to_wei(9), from_wallet=alice_wallet)
# sufficient allowance
T1.approve(pool.address, to_wei(30), from_wallet=alice_wallet)
pool.rebind(T1.address, to_wei(120), to_wei(9), from_wallet=alice_wallet)
def test_gulp(network, config, web3, T1, alice_wallet):
"""Test pool gulp."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
# bind T1 to the pool, with a balance of 2.0
T1.approve(pool.address, to_wei(50), from_wallet=alice_wallet)
pool.bind(T1.address, to_wei(2), to_wei(50), from_wallet=alice_wallet)
# T1 is now pool's (a) ERC20 balance (b) _records[token].balance
assert T1.balanceOf(pool.address) == to_wei(2) # ERC20 balance
assert pool.getBalance(T1.address) == to_wei(2) # records[]
# but then some joker accidentally sends 5.0 tokens to the pool's address
# rather than binding / rebinding. So it's in ERC20 bal but not records[]
T1.transfer(pool.address, to_wei(5), from_wallet=alice_wallet)
assert T1.balanceOf(pool.address) == to_wei(2 + 5) # ERC20 bal
assert pool.getBalance(T1.address) == to_wei(2) # records[]
# so, 'gulp' gets the pool to absorb the tokens into its balances.
# i.e. to update _records[token].balance to be in sync with ERC20 balance
pool.gulp(T1.address, from_wallet=alice_wallet)
assert T1.balanceOf(pool.address) == to_wei(2 + 5) # ERC20
assert pool.getBalance(T1.address) == to_wei(2 + 5) # records[]
def test_spot_price(network, config, web3, T1, T2, alice_wallet):
"""Test calculation of prices on spot."""
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 1, 1, 1, 1
)
assert price_sans_fee == to_wei(1)
assert price == to_wei("1.000001000001000001")
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
assert price_sans_fee == to_wei(1)
assert price == to_wei("1.000001000001000001")
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 1, 2, 1, 1
)
assert price_sans_fee == to_wei("0.5")
assert price == to_wei("0.500000500000500001")
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 2, 1, 1, 1
)
assert price_sans_fee == to_wei(2)
assert price == to_wei("2.000002000002000002")
(price, price_sans_fee) = _spotPrices(
network, config, web3, T1, T2, alice_wallet, 9, 10, 9, 1
)
assert price_sans_fee == to_wei("0.1")
assert price == to_wei("0.100000100000100000")
def test_joinSwapExternAmountIn(
network, config, web3, T1, T2, alice_wallet, alice_address
):
"""Tests adding an external amount inside a pool.
When the pool is not public, assert that an Exception is thrown.
When the pool is public, assert that the swap is made and the correct balance remains.
"""
init_T1balance = T1.balanceOf(alice_address)
T2balance = T2.balanceOf(alice_address)
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
T1.approve(pool.address, to_wei(100), from_wallet=alice_wallet)
# pool's not public
with pytest.raises(Exception):
pool.swapExactAmountOut(
tokenIn_address=T1.address,
maxAmountIn=to_wei(100),
tokenOut_address=T2.address,
tokenAmountOut=to_wei(10),
maxPrice=HUGEINT,
from_wallet=alice_wallet,
)
# pool's public
pool.setPublicSwap(True, from_wallet=alice_wallet)
pool.swapExactAmountOut(
tokenIn_address=T1.address,
maxAmountIn=to_wei(100),
tokenOut_address=T2.address,
tokenAmountOut=to_wei(1),
maxPrice=HUGEINT,
from_wallet=alice_wallet,
)
new_balance = init_T1balance - to_wei("91.055")
assert (
new_balance - to_wei("0.005")
<= T1.balanceOf(alice_address)
<= new_balance + to_wei("0.005")
)
assert T2.balanceOf(alice_address) == T2balance - to_wei(9)
block = web3.eth.block_number
block_confirmations = config.block_confirmations.value
swap_log = pool.get_swap_logs(block - (block_confirmations + 1), block)[0]
assert swap_log["args"]["tokenIn"] == T1.address
def test_joinswapPoolAmountOut(
network, config, web3, T1, T2, alice_address, alice_wallet
):
"""Tests taking an amount out of the pool."""
T1balance = T1.balanceOf(alice_address)
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = BPT.balanceOf(alice_address)
T1.approve(pool.address, to_wei(90), from_wallet=alice_wallet)
assert T1.balanceOf(alice_address) == T1balance - to_wei(90)
T1balance = T1.balanceOf(alice_address)
pool.joinswapPoolAmountOut(
tokenIn_address=T1.address,
poolAmountOut=to_wei(10), # BPT wanted
maxAmountIn=to_wei(90), # max T1 to spend
from_wallet=alice_wallet,
)
assert T1.balanceOf(alice_address) >= T1balance - to_wei(90)
assert BPT.balanceOf(alice_address) == pool_balance + to_wei(10)
def test_exitswapPoolAmountIn(
network, config, web3, T1, T2, alice_address, alice_wallet
):
T1balance = T1.balanceOf(alice_address)
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = BPT.balanceOf(alice_address)
assert T1.balanceOf(alice_address) == T1balance - to_wei(90)
pool.exitswapPoolAmountIn(
tokenOut_address=T1.address,
poolAmountIn=to_wei(10), # BPT spent
minAmountOut=to_wei(1), # min T1 wanted
from_wallet=alice_wallet,
)
assert T1.balanceOf(alice_address) >= T1balance - to_wei(90) + to_wei(1)
assert BPT.balanceOf(alice_address) == pool_balance - to_wei(10)
def test_exitswapExternAmountOut(
network, config, web3, T1, T2, alice_address, alice_wallet, alice_ocean
):
T1balance = T1.balanceOf(alice_address)
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, alice_wallet, 90, 10, 9, 1
)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = BPT.balanceOf(alice_address)
assert T1.balanceOf(alice_address) == T1balance - to_wei(90)
pool.exitswapExternAmountOut(
tokenOut_address=T1.address,
tokenAmountOut=to_wei(2), # T1 wanted
maxPoolAmountIn=to_wei(10), # max BPT spent
from_wallet=alice_wallet,
)
assert T1.balanceOf(alice_address) == T1balance - to_wei(90) + to_wei(2)
assert BPT.balanceOf(alice_address) >= pool_balance - to_wei(10)
block = alice_ocean.web3.eth.block_number
block_confirmations = config.block_confirmations.value
exit_log = pool.get_exit_logs(block - (block_confirmations + 1), block)[0]
assert exit_log["args"]["tokenOut"] == T1.address
def test_calcSpotPrice(network, config, web3, T1, T2, alice_address, alice_wallet):
"""Tests pricing with calcSpotPrice."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcSpotPrice(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
tokenBalanceOut=to_wei(11),
tokenWeightOut=to_wei(1),
swapFee=0,
)
assert x == to_wei("0.909090909090909091")
def test_calcOutGivenIn(network, config, web3, alice_wallet):
"""Tests pricing with calcOutGivenIn."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcOutGivenIn(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
tokenBalanceOut=to_wei("10.1"),
tokenWeightOut=to_wei(1),
tokenAmountIn=to_wei(1),
swapFee=0,
)
assert x == to_wei("0.918181818181818181")
def test_calcInGivenOut(network, config, web3, alice_wallet):
"""Tests pricing with calcInGivenOut."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcInGivenOut(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
tokenBalanceOut=to_wei("10.1"),
tokenWeightOut=to_wei(1),
tokenAmountOut=to_wei(1),
swapFee=0,
)
assert x == to_wei("1.098901098901098900")
def test_calcPoolOutGivenSingleIn(network, config, web3, alice_wallet):
"""Tests calculations with calcPoolOutGivenSingleIn."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcPoolOutGivenSingleIn(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
poolSupply=to_wei(120),
totalWeight=to_wei(2),
tokenAmountIn=to_wei("0.1"),
swapFee=0,
)
assert x == to_wei("0.598507453453125000")
def test_calcSingleInGivenPoolOut(network, config, web3, alice_wallet):
"""Tests pricing with calcSingleInGivenPoolOut."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcSingleInGivenPoolOut(
tokenBalanceIn=to_wei(10),
tokenWeightIn=to_wei(1),
poolSupply=to_wei(120),
totalWeight=to_wei(2),
poolAmountOut=to_wei(10),
swapFee=0,
)
assert x == to_wei("1.736111111111111100")
def test_calcSingleOutGivenPoolIn(network, config, web3, alice_wallet):
"""Tests pricing with calcSingleOutGivenPoolIn."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcSingleOutGivenPoolIn(
tokenBalanceOut=to_wei(10),
tokenWeightOut=to_wei(1),
poolSupply=to_wei(120),
totalWeight=to_wei(2),
poolAmountIn=to_wei(10),
swapFee=0,
)
assert x == to_wei("1.597222222222222220")
def test_calcPoolInGivenSingleOut(network, config, web3, alice_wallet):
"""Tests calculations with calcPoolInGivenSingleOut."""
pool = _deployBPool(web3, config.address_file, network, alice_wallet)
x = pool.calcPoolInGivenSingleOut(
tokenBalanceOut=to_wei(1000),
tokenWeightOut=to_wei(5),
poolSupply=to_wei(100),
totalWeight=to_wei(10),
tokenAmountOut=to_wei("0.1"),
swapFee=0,
)
assert x == to_wei("0.005000125006250000")
@enforce_types
def _createPoolWith2Tokens(
network: str,
config: Config,
web3: Web3,
T1: BToken,
T2: BToken,
wallet: Wallet,
bal1: Union[Decimal, str, int],
bal2: Union[Decimal, str, int],
w1: Union[Decimal, str, int],
w2: Union[Decimal, str, int],
):
"""Helper function to create a basic pool containing 2 tokens."""
pool = _deployBPool(web3, config.address_file, network, wallet)
T1.get_tx_receipt(web3, T1.approve(pool.address, to_wei(bal1), from_wallet=wallet))
T2.get_tx_receipt(web3, T2.approve(pool.address, to_wei(bal2), from_wallet=wallet))
if pool.isBound(T1.address):
pool.unbind(T1.address, wallet)
if pool.isBound(T2.address):
pool.unbind(T2.address, wallet)
pool.bind(T1.address, to_wei(bal1), to_wei(w1), from_wallet=wallet)
pool.bind(T2.address, to_wei(bal2), to_wei(w2), from_wallet=wallet)
return pool
@enforce_types
def _deployBPool(
web3: Web3, address_file: str, network: str, from_wallet: Wallet
) -> BPool:
"""Helper function to deploy a pool."""
factory_address = get_bfactory_address(address_file, network)
factory = BFactory(web3, factory_address)
pool_address = factory.newBPool(from_wallet=from_wallet)
pool = BPool(web3, pool_address)
return pool
@enforce_types
def _spotPrices(
network: str,
config: Config,
web3: Web3,
T1: BToken,
T2: BToken,
wallet: Wallet,
bal1: Union[Decimal, str, int],
bal2: Union[Decimal, str, int],
w1: Union[Decimal, str, int],
w2: Union[Decimal, str, int],
):
"""Helper function to allow for spot price calculations."""
pool = _createPoolWith2Tokens(
network, config, web3, T1, T2, wallet, bal1, bal2, w1, w2
)
a1, a2 = T1.address, T2.address
return (pool.getSpotPrice(a1, a2), pool.getSpotPriceSansFee(a1, a2))
|
py | 1a4a3c4438067ca2606b605817d9625ad44d42e3 | #!/usr/bin/env python3
import subprocess
import argparse
from pathlib import Path
import re
from statistics import stdev, mean, median_high
from math import floor, ceil
time_parser = re.compile(r'Solution found in (\d+.\d+) ms')
num_runs = 10
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--binary', type=str, help='Path to binary to benchmark', required=True)
parser.add_argument('-i', '--input', type=str, help='Path to inputs for the benchmark. Should contain one sudoku per line.', required=True)
args = parser.parse_args()
if not Path(args.binary).is_file():
print('Argument {} does not specify a valid path to a binary'.format(args.binary))
exit(1)
if not Path(args.input).is_file():
print('Argument {} does not specify a valid path to an input file'.format(args.binary))
exit(2)
def unfurl_line(line):
assert len(line) == 9 * 9
return '\n'.join(line[i:i+9] for i in range(0, 81, 9))
def run_with_input(line):
results = []
for i in range(num_runs):
foo = subprocess.run([args.binary], check=True, input=unfurl_line(line),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
matched = time_parser.match(foo.stderr)
results.append(float(matched.groups()[0]))
return mean(results), stdev(results)
with open(args.input, 'r') as input_file, open('table.md', 'w') as table_file, open('results.csv', 'w') as csv_file:
table_file.write('| Problem | Time taken mean (ms) | Time taken stdev (ms) |\n')
table_file.write('|---------|----------------------|-----------------------|\n')
for idx, line in enumerate(input_file):
line = line.rstrip()
result = run_with_input(line)
table_file.write('| {} | {} | {} |\n'.format(idx, *result))
csv_file.write('{}, {}, {}\n'.format(idx, *result))
print('Problem: {}, mean: {}, stdev: {}'.format(idx, *result))
|
py | 1a4a3c8b265bc7e55ea6b449d040a1c9e052a422 | """
.. _ex-report:
================================
Make an MNE-Report with a Slider
================================
In this example, MEG evoked data are plotted in an html slider.
"""
# Authors: Teon Brooks <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
from mne.report import Report
from mne.datasets import sample
from mne import read_evokeds
from matplotlib import pyplot as plt
data_path = sample.data_path()
meg_path = data_path + '/MEG/sample'
subjects_dir = data_path + '/subjects'
evoked_fname = meg_path + '/sample_audvis-ave.fif'
###############################################################################
# Do standard folder parsing (this can take a couple of minutes):
report = Report(image_format='png', subjects_dir=subjects_dir,
info_fname=evoked_fname, subject='sample',
raw_psd=False) # use False for speed here
report.parse_folder(meg_path, on_error='ignore', mri_decim=10)
###############################################################################
# Add a custom section with an evoked slider:
# Load the evoked data
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(None, 0), verbose=False)
evoked.crop(0, .2)
times = evoked.times[::4]
# Create a list of figs for the slider
figs = list()
for t in times:
figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='png') # can also use 'svg'
# to save report
report.save('my_report.html', overwrite=True)
|
py | 1a4a3d4af30b466a9fcbd3a29c6ccbbe4ac33587 | """Constant Kernel and WhiteNoise Kernel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math.psd_kernels.internal import util
from tensorflow_probability.python.math.psd_kernels.positive_semidefinite_kernel import (
PositiveSemidefiniteKernel,
)
class _Constant(PositiveSemidefiniteKernel):
def __init__(self, coef=None, feature_ndims=1, validate_args=False, name="Constant"):
parameters = dict(locals())
with tf.name_scope(name):
dtype = util.maybe_get_common_dtype([coef])
self._coef = tensor_util.convert_nonref_to_tensor(coef, dtype=dtype, name="coef")
super(_Constant, self).__init__(
feature_ndims,
dtype=dtype,
name=name,
validate_args=validate_args,
parameters=parameters,
)
def _apply(self, x1, x2, example_ndims=0):
shape = tf.broadcast_dynamic_shape(
x1.shape[: -(self.feature_ndims)], x2.shape[: -(self.feature_ndims)],
)
expected = tf.ones(shape, dtype=self._dtype)
if self.coef is not None:
coef = tf.convert_to_tensor(self._coef)
coef = util.pad_shape_with_ones(coef, example_ndims)
expected *= coef
return expected
@property
def coef(self):
return self._coef
def _batch_shape(self):
scalar_shape = tf.TensorShape([])
return scalar_shape if self.coef is None else self.coef.shape
def _batch_shape_tensor(self):
return tf.TensorShape([]) if self.coef is None else self.coef.shape
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
for arg_name, arg in dict(coef=self.coef).items():
if arg is not None and is_init != tensor_util.is_ref(arg):
assertions.append(
assert_util.assert_positive(
arg, message="{} must be positive.".format(arg_name)
)
)
return assertions
class _WhiteNoise(PositiveSemidefiniteKernel):
def __init__(self, noise=None, feature_ndims=1, validate_args=False, name="WhiteNoise"):
parameters = dict(locals())
with tf.name_scope(name):
dtype = util.maybe_get_common_dtype([noise])
self._noise = tensor_util.convert_nonref_to_tensor(noise, dtype=dtype, name="noise")
super(_WhiteNoise, self).__init__(
feature_ndims,
dtype=dtype,
name=name,
validate_args=validate_args,
parameters=parameters,
)
def _apply(self, x1, x2, example_ndims=0):
raise NotImplementedError("WhiteNoise kernel cannot be evaluated at a point!")
def _matrix(self, x1, x2):
shape = tf.broadcast_dynamic_shape(
x1.shape[: -(1 + self.feature_ndims)], x2.shape[: -(1 + self.feature_ndims)],
)
expected = tf.linalg.eye(
x1.shape[-(1 + self.feature_ndims)],
x2.shape[-(1 + self.feature_ndims)],
batch_shape=shape,
dtype=self._dtype,
)
if self.noise is not None:
noise = tf.convert_to_tensor(self._noise)
noise = util.pad_shape_with_ones(noise, 2)
expected *= noise
return expected
@property
def noise(self):
return self._noise
def _batch_shape(self):
scalar_shape = tf.TensorShape([])
return scalar_shape if self.noise is None else self.noise.shape
def _batch_shape_tensor(self):
return [] if self.noise is None else tf.shape(self.noise)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
for arg_name, arg in dict(noise=self.noise).items():
if arg is not None and is_init != tensor_util.is_ref(arg):
assertions.append(
assert_util.assert_positive(
arg, message="{} must be positive.".format(arg_name)
)
)
return assertions
class _Exponential(PositiveSemidefiniteKernel):
def __init__(
self,
amplitude=None,
length_scale=None,
feature_ndims=1,
validate_args=False,
name="Exponential",
):
parameters = dict(locals())
with tf.name_scope(name):
dtype = util.maybe_get_common_dtype([amplitude, length_scale])
self._amplitude = tensor_util.convert_nonref_to_tensor(amplitude, dtype=dtype)
self._length_scale = tensor_util.convert_nonref_to_tensor(length_scale, dtype=dtype)
super(_Exponential, self).__init__(
feature_ndims=feature_ndims,
dtype=dtype,
name=name,
validate_args=validate_args,
parameters=parameters,
)
@property
def length_scale(self):
return self._length_scale
@property
def amplitude(self):
return self._amplitude
def _apply(self, x1, x2, example_ndims=0):
sqdist = util.sum_rightmost_ndims_preserving_shape(
tf.math.squared_difference(x1, x2), self.feature_ndims
)
ndist = -0.5 * tf.sqrt(sqdist + 1e-12)
if self.length_scale is not None:
length_scale = tf.convert_to_tensor(self._length_scale)
length_scale = util.pad_shape_with_ones(length_scale, example_ndims)
ndist /= length_scale ** 2
if self.amplitude is not None:
amplitude = tf.convert_to_tensor(self._amplitude)
amplitude = util.pad_shape_with_ones(amplitude, example_ndims)
return amplitude ** 2 * tf.exp(ndist)
return tf.exp(ndist)
def _batch_shape(self):
scalar_shape = tf.TensorShape([])
return tf.broadcast_static_shape(
scalar_shape if self.amplitude is None else self.amplitude.shape,
scalar_shape if self.length_scale is None else self.length_scale.shape,
)
def _batch_shape_tensor(self):
return tf.broadcast_dynamic_shape(
[] if self.amplitude is None else tf.shape(self.amplitude),
[] if self.length_scale is None else tf.shape(self.length_scale),
)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
for arg_name, arg in dict(amplitude=self.amplitude, length_scale=self.length_scale).items():
if arg is not None and is_init != tensor_util.is_ref(arg):
assertions.append(
assert_util.assert_positive(
arg, message="{} must be positive.".format(arg_name)
)
)
return assertions
class _Gibbs(PositiveSemidefiniteKernel):
def __init__(
self,
length_scale_fn=None,
fn_args=None,
feature_ndims=1,
dtype=tf.float32,
validate_args=False,
name="Gibbs",
):
parameters = locals()
with tf.name_scope(name):
self._length_scale_fn = length_scale_fn
self._fn_args = fn_args
super(_Gibbs, self).__init__(
feature_ndims=feature_ndims,
dtype=dtype,
name=name,
validate_args=validate_args,
parameters=parameters,
)
def _log_apply(self, lx1, lx2):
loglx1 = tf.math.log(lx1)
loglx2 = tf.math.log(lx2)
lognum = util.sum_rightmost_ndims_preserving_shape(
loglx1 + loglx2 + math.log(2.0), self.feature_ndims
)
logdenom = util.sum_rightmost_ndims_preserving_shape(
tf.math.log(lx1 ** 2 + lx2 ** 2), self.feature_ndims
)
return tf.exp(0.5 * (lognum - logdenom))
def _fast_apply(self, x1, x2):
lx1 = tf.convert_to_tensor(self._length_scale_fn(x1, *self._fn_args))
lx2 = tf.convert_to_tensor(self._length_scale_fn(x2, *self._fn_args))
lx12, lx22 = lx1 ** 2, lx2 ** 2
scal = util.sum_rightmost_ndims_preserving_shape(
tf.sqrt(2 * lx1 * lx2 / (lx12 + lx22)), self.feature_ndims
)
sqdist = tf.math.squared_difference(x1, x2)
sqdist /= lx12 + lx22
sqdist = util.sum_rightmost_ndims_preserving_shape(sqdist, self.feature_ndims)
return scal * tf.exp(-sqdist)
def _apply(self, x1, x2, example_ndims=0):
if self._length_scale_fn is not None:
if x1.shape[-1] == 1 and self.feature_ndims == 1:
return self._fast_apply(x1, x2)
lx1 = tf.convert_to_tensor(self._length_scale_fn(x1, *self._fn_args))
lx2 = tf.convert_to_tensor(self._length_scale_fn(x2, *self._fn_args))
scal = self._log_apply(lx1, lx2)
sqdist = tf.math.squared_difference(x1, x2)
sqdist /= lx1 ** 2 + lx2 ** 2
sqdist = util.sum_rightmost_ndims_preserving_shape(sqdist, self.feature_ndims)
return scal * tf.exp(-sqdist)
sqdist = util.sum_rightmost_ndims_preserving_shape(
tf.math.squared_difference(x1, x2), self.feature_ndims
)
return tf.exp(-sqdist / 2)
def _batch_shape(self):
return tf.TensorShape([])
def _batch_shape_tensor(self):
return tf.shape([])
def _parameter_control_dependencies(self, is_init):
return []
class _Cosine(PositiveSemidefiniteKernel):
def __init__(
self,
length_scale=None,
amplitude=None,
feature_ndims=1,
validate_args=False,
name="Cosine",
):
parameters = locals()
with tf.name_scope(name):
dtype = util.maybe_get_common_dtype([length_scale, amplitude])
self._length_scale = tensor_util.convert_nonref_to_tensor(length_scale, dtype=dtype)
self._amplitude = tensor_util.convert_nonref_to_tensor(amplitude, dtype=dtype)
super(_Cosine, self).__init__(
feature_ndims=feature_ndims,
dtype=dtype,
name=name,
validate_args=validate_args,
parameters=parameters,
)
@property
def length_scale(self):
return self._length_scale
@property
def amplitude(self):
return self._amplitude
def _apply(self, x1, x2, example_ndims=0):
component = (
2.0
* math.pi
* tf.sqrt(
util.sum_rightmost_ndims_preserving_shape(
tf.math.squared_difference(x1, x2), self.feature_ndims
)
)
)
if self.length_scale is not None:
length_scale = tf.convert_to_tensor(self._length_scale)
length_scale = util.pad_shape_with_ones(length_scale, example_ndims)
component /= length_scale ** 2
if self.amplitude is not None:
amplitude = tf.convert_to_tensor(self._amplitude)
amplitude = util.pad_shape_with_ones(amplitude, example_ndims)
return amplitude ** 2 * tf.math.cos(component)
return tf.math.cos(component)
def _batch_shape(self):
scalar_shape = tf.TensorShape([])
return tf.broadcast_static_shape(
scalar_shape if self._amplitude is None else self._amplitude.shape,
tf.broadcast_static_shape(
scalar_shape if self._length_scale is None else self._length_scale.shape,
scalar_shape if self._period is None else self._period.shape,
),
)
def _batch_shape_tensor(self):
return tf.broadcast_dynamic_shape(
tf.broadcast_dynamic_shape(
[] if self.amplitude is None else tf.shape(self.amplitude),
[] if self.length_scale is None else tf.shape(self.length_scale),
),
[] if self.period is None else tf.shape(self.period),
)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
for arg_name, arg in dict(
amplitude=self.amplitude, length_scale=self.length_scale, period=self.period
).items():
if arg is not None and is_init != tensor_util.is_ref(arg):
assertions.append(
assert_util.assert_positive(
arg, message="{} must be positive.".format(arg_name)
)
)
return assertions
# FIXME: This kernel is not implemented currently as tensorflow doesn't allow
# slicing with tensors or arrays. Any help would be appriciated.
# class Coregion(PositiveSemidefiniteKernel):
# def __init__(
# self, W=None, kappa=None, B=None, feature_ndims=None, validate_args=False, name="Coregion"
# ):
# parameters = locals()
# with tf.name_scope(name):
# dtype = util.maybe_get_common_dtype([W, kappa, B])
# self._W = tensor_util.convert_nonref_to_tensor(W)
# self._kappa = tensor_util.convert_nonref_to_tensor(kappa)
# if B is not None:
# self._B = tensor_util.convert_nonref_to_tensor(B)
# else:
# self._B = tf.linalg.matmul(self._W, self._W, transpose_b=True) + tf.linalg.diag(
# self._kappa
# )
# super().__init__(
# feature_ndims=feature_ndims,
# dtype=dtype,
# name=name,
# validate_args=validate_args,
# parameters=parameters,
# )
# @property
# def W(self):
# return self._W
# @property
# def B(self):
# return self._B
# @property
# def kappa(self):
# return self._kappa
# def _apply(self, x1, x2, example_ndims=0):
# raise NotImplementedError("Coregion doesn't have a point evaluation scheme")
# def _matrix(self, x1, x2):
# x1_idx = tf.cast(x1, tf.int32)
# x2_idx = tf.cast(x2, tf.int32).T
# return tf.gather_nd(self._B,)
class _ScaledCov(PositiveSemidefiniteKernel):
def __init__(
self,
kernel=None,
scaling_fn=None,
fn_args=None,
feature_ndims=1,
validate_args=False,
name="ScaledCov",
):
parameters = locals()
with tf.name_scope(name):
self._kernel = kernel
self._scaling_fn = scaling_fn
if fn_args is None:
fn_args = tuple()
self._fn_args = fn_args
super(_ScaledCov, self).__init__(
feature_ndims=feature_ndims,
dtype=kernel._dtype,
name=name,
validate_args=validate_args,
parameters=parameters,
)
@property
def kernel(self):
return self._kernel
@property
def scaling_fn(self):
return self._scaling_fn
@property
def fn_Args(self):
return self._fn_args
def _apply(self, x1, x2, example_ndims=0):
cov = self._kernel._apply(x1, x2, example_ndims)
if self._scaling_fn is not None:
scal_x1 = tf.convert_to_tensor(self._scaling_fn(x1, *self._fn_args))
scal_x2 = tf.convert_to_tensor(self._scaling_fn(x2, *self._fn_args))
scal = util.sum_rightmost_ndims_preserving_shape(scal_x1 * scal_x2, self._feature_ndims)
return scal * cov
return cov
def _matrix(self, x1, x2):
cov = self._kernel._matrix(x1, x2)
if self._scaling_fn is not None:
scal_x1 = util.pad_shape_with_ones(
tf.convert_to_tensor(self._scaling_fn(x1, *self._fn_args)),
ndims=1,
start=-(self._feature_ndims + 1),
)
scal_x2 = util.pad_shape_with_ones(
tf.convert_to_tensor(self._scaling_fn(x2, *self._fn_args)),
ndims=1,
start=-(self._feature_ndims + 2),
)
scal = util.sum_rightmost_ndims_preserving_shape(
scal_x1 * scal_x2, ndims=self._feature_ndims
)
return scal * cov
return cov
def _batch_shape(self):
return self._kernel.batch_shape
def _batch_shape_tensor(self):
return self._kernel._batch_shape_tensor()
def _parameter_control_dependencies(self, is_init):
return self._kernel._parameter_control_dependencies(is_init=is_init)
|
py | 1a4a3eb44e91bff73c043a618791145a47fdfb96 | # -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <[email protected]>
"""
Defines functions for plotting the results of the identify step.
"""
from functools import singledispatch
import numpy as np
import scipy.linalg as la
from fsc.export import export
from .result import NodalPoint, NodalLine
from .._common_plot import _setup_plot
@export
def result(res, *, axis=None):
"""Plot the result of the identify step.
Arguments
---------
res : IdentificationResultContainer
Result of the identify step.
axis : matplotlib.axes.Axes, optional
Axes on which the result is plotted.
"""
fig, axis, _ = _setup_plot(res.coordinate_system.limits, axis=axis)
feature_size = res.feature_size
for identification_result in res:
shape = identification_result.shape
color = axis._get_lines.get_next_color() # pylint: disable=protected-access
if shape is None:
_plot_positions(
identification_result.positions, axis=axis, color=color
)
else:
_plot_result(
shape, axis=axis, color=color, feature_size=feature_size
)
return fig, axis
def _plot_positions(positions, *, axis, color):
coordinates = list(np.array(list(positions)).T)
axis.scatter(*coordinates, color=color)
@singledispatch
def _plot_result(shape, axis, color, feature_size):
raise NotImplementedError
@export
@_plot_result.register(NodalPoint)
def nodal_point(shape, *, axis, color, feature_size=None):
"""
Plot a nodal point.
Arguments
---------
shape : NodalPoint
Nodal point to be plotted.
axis : matplotlib.axes.Axes
Axes on which to plot.
color : str
Color of the point.
feature_size : float
Distance between two nodal points at which they are considered distinct.
This argument is not used in this function.
"""
coordinates = [[val] for val in shape.position]
axis.scatter(*coordinates, color=color)
@export
@_plot_result.register(NodalLine)
def nodal_line(shape, *, axis, color, feature_size=None):
"""
Plot a nodal line.
Arguments
---------
shape : NodalLine
Nodal line to be plotted.
axis : matplotlib.axes.Axes
Axes on which to plot.
color : str
Color of the nodal line.
feature_size : float
Distance between two nodal points at which they are considered distinct.
Used for cutting the line when it goes across periodic boundaries.
"""
if feature_size is None:
feature_size = np.inf
graph = shape.graph
paths = _get_graph_paths(graph, feature_size=feature_size)
if paths:
for path in paths:
axis.plot(*np.array(path).T, color=color) # pylint: disable=not-an-iterable
else:
axis.scatter(*np.array(list(graph.nodes)).T, color=color) # pylint: disable=not-an-iterable
def _get_graph_paths(graph, feature_size):
"""
Separate a graph into paths, breaking when there is no neighbor or when
passing across the periodic boundary.
"""
working_graph = graph.copy()
paths = []
while working_graph.edges:
curr_node = _get_next_starting_point(working_graph)
curr_path = [curr_node]
while True:
try:
next_node = next(working_graph.neighbors(curr_node))
except StopIteration:
paths.append(curr_path)
break
if la.norm(
np.array(next_node) - np.array(curr_node)
) > 2 * feature_size:
paths.append(curr_path)
curr_path = [next_node]
else:
curr_path.append(next_node)
working_graph.remove_edge(curr_node, next_node)
curr_node = next_node
return paths
def _get_next_starting_point(graph):
nonzero_degree = [(node, degree) for node, degree in graph.degree
if degree > 0]
return min(
nonzero_degree,
key=lambda val: val[1] if val[1] != 2 else float('inf')
)[0]
|
py | 1a4a3ebd153b7cfc00553125517d41e3d2b0022a | import secure_smtpd.config
from .config import LOG_NAME
from .smtp_server import SMTPServer
from .fake_credential_validator import FakeCredentialValidator
from .proxy_server import ProxyServer
|
py | 1a4a3f523cbbef20b174f987419e87228cb2a426 | import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import Index, Series
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
class TestSeriesQuantile:
def test_quantile(self, datetime_series):
q = datetime_series.quantile(0.1)
assert q == np.percentile(datetime_series.dropna(), 10)
q = datetime_series.quantile(0.9)
assert q == np.percentile(datetime_series.dropna(), 90)
# object dtype
q = Series(datetime_series, dtype=object).quantile(0.9)
assert q == np.percentile(datetime_series.dropna(), 90)
# datetime64[ns] dtype
dts = datetime_series.index.to_series()
q = dts.quantile(0.2)
assert q == Timestamp("2000-01-10 19:12:00")
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(0.25)
assert q == pd.to_timedelta("24:00:00")
# GH7661
result = Series([np.timedelta64("NaT")]).sum()
assert result == pd.Timedelta(0)
msg = "percentiles should all be in the interval \\[0, 1\\]"
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with pytest.raises(ValueError, match=msg):
datetime_series.quantile(invalid)
def test_quantile_multi(self, datetime_series):
qs = [0.1, 0.9]
result = datetime_series.quantile(qs)
expected = pd.Series(
[
np.percentile(datetime_series.dropna(), 10),
np.percentile(datetime_series.dropna(), 90),
],
index=qs,
name=datetime_series.name,
)
tm.assert_series_equal(result, expected)
dts = datetime_series.index.to_series()
dts.name = "xxx"
result = dts.quantile((0.2, 0.2))
expected = Series(
[Timestamp("2000-01-10 19:12:00"), Timestamp("2000-01-10 19:12:00")],
index=[0.2, 0.2],
name="xxx",
)
tm.assert_series_equal(result, expected)
result = datetime_series.quantile([])
expected = pd.Series(
[], name=datetime_series.name, index=Index([], dtype=float)
)
tm.assert_series_equal(result, expected)
def test_quantile_interpolation(self, datetime_series):
# see gh-10174
# interpolation = linear (default case)
q = datetime_series.quantile(0.1, interpolation="linear")
assert q == np.percentile(datetime_series.dropna(), 10)
q1 = datetime_series.quantile(0.1)
assert q1 == np.percentile(datetime_series.dropna(), 10)
# test with and without interpolation keyword
assert q == q1
def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation="lower")
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation="higher")
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
assert result == expected
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan], index=[0.2, 0.3]))
@pytest.mark.parametrize(
"case",
[
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
],
[
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
],
[pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")],
# NaT
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.NaT,
],
[
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
pd.NaT,
],
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.NaT,
],
],
)
def test_quantile_box(self, case):
s = pd.Series(case, name="XXX")
res = s.quantile(0.5)
assert res == case[1]
res = s.quantile([0.5])
exp = pd.Series([case[1]], index=[0.5], name="XXX")
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
assert pd.isna(Series([], dtype="M8[ns]").quantile(0.5))
assert pd.isna(Series([], dtype="m8[ns]").quantile(0.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
@pytest.mark.parametrize(
"values, dtype",
[([0, 0, 0, 1, 2, 3], "Sparse[int]"), ([0.0, None, 1.0, 2.0], "Sparse[float]")],
)
def test_quantile_sparse(self, values, dtype):
ser = pd.Series(values, dtype=dtype)
result = ser.quantile([0.5])
expected = pd.Series(np.asarray(ser)).quantile([0.5])
tm.assert_series_equal(result, expected)
def test_quantile_empty(self):
# floats
s = Series([], dtype="float64")
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# int
s = Series([], dtype="int64")
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# datetime
s = Series([], dtype="datetime64[ns]")
res = s.quantile(0.5)
assert res is pd.NaT
res = s.quantile([0.5])
exp = Series([pd.NaT], index=[0.5])
tm.assert_series_equal(res, exp)
|
py | 1a4a40a68ed4a4531174d75e071c0a162e0b582c |
from django.db import models
class File(models.Model):
file = models.FileField(blank=False, null=False)
remark = models.CharField(max_length=20)
timestamp = models.DateTimeField(auto_now_add=True) |
py | 1a4a41349d037d586adc87daa6ec9be74aff0272 | import pymongo
from pyzotero import zotero
library_id = '126319'
library_type='group'
#api_key = 'hT2SLlqonWyD1253s93zg3bC'
api_key = 'lHwNOyZeZkFhnGsPPI0KCNeQ'
mongo = pymongo.Connection ('localhost', 27017)['sicki']
zot = zotero.Zotero(library_id, library_type, api_key)
total = zot.num_items ()
print total
limit = 30
start = 0
count = 0
update = 0
insert = 0
while start < total:
print '%d' % (start,)
items = zot.top (start = start, limit = limit)
for item in items:
# Strangeness if start and limit exceeed total number of items. Don't read more than the total number of items
if count >= total:
break
count += 1
if not mongo.refs.find ({'key': item['key']}).count ():
insert += 1
mongo.refs.insert (item)
else:
update += 1
mongo.refs.update ({
'key': item['key']
}, {
'$set': item
})
start += limit
print "%d Records Found: %d Inserted, %d Updated" % (count, insert, update)
|
py | 1a4a4181676813cf36906af52837e1428c79ac2a | import logging
logging.addLevelName(logging.DEBUG, "\033[1;32m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(
logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING)
)
logging.addLevelName(logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
def get_logger(initname, verb=False):
"""
This function provides a logger to all scripts used in this project.
:param initname: The name of the logger to show up in log.
:param verb: Toggle verbosity
:return: the finished Logger object.
"""
logger = logging.getLogger(initname)
if type(verb) is bool:
logger.setLevel(logging.INFO if verb else logging.WARNING)
else:
logger.setLevel(verb) # TODO: hacky shit
ch = logging.StreamHandler()
ch.setLevel(logging.INFO if verb else logging.WARNING)
logstring = (
'\033[1;32m[%(asctime)s]\033[1;0m \033[1m%(name)s\033[1;0m - %(levelname)s - %(message)s'
)
formatter = logging.Formatter(logstring, '%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
if logger.hasHandlers():
logger.handlers.clear()
logger.addHandler(ch)
logger.propagate = False
return logger
|
py | 1a4a41c9155315629bc5c14d66ecc0d266f80ae5 | from unittest import TestCase
from expects import expect, equal, raise_error
from slender import List
class TestDelete(TestCase):
def setUp(self):
self.l = List([1, 2, 3, 2, 3, 2])
def test_delete_if_obj_is_found_multiple_times(self):
expect(self.l.delete(2).to_list()).to(equal([1, 3, 3]))
def test_delete_if_obj_is_not_found(self):
expect(self.l.delete(5).to_list()).to(equal([1, 2, 3, 2, 3, 2]))
def test_delete_if_self_is_empty(self):
l = List()
expect(l.delete(3).to_list()).to(equal([]))
|
py | 1a4a422fb6b5dc1fd21586e585be7dd50d5e1c9c | ##
# See the file COPYRIGHT for copyright information.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Street tests for :mod:`ranger-ims-server.store`
"""
from ims.ext.trial import asyncAsDeferred
from ims.model import Event
from .base import DataStoreTests
__all__ = ()
class DataStoreConcentricStreetTests(DataStoreTests):
"""
Tests for :class:`IMSDataStore` concentric street access.
"""
@asyncAsDeferred
async def test_concentricStreets(self) -> None:
"""
:meth:`IMSDataStore.createConcentricStreet` returns the concentric
streets for the given event.
"""
for event, streetID, streetName in (
(Event(id="Foo"), "A", "Alpha"),
(Event(id="Foo Bar"), "B", "Bravo"),
(Event(id="XYZZY"), "C", "Charlie"),
):
store = await self.store()
await store.createEvent(event)
await store.storeConcentricStreet(event, streetID, streetName)
concentricStreets = await store.concentricStreets(event)
self.assertEqual(len(concentricStreets), 1)
self.assertEqual(concentricStreets.get(streetID), streetName)
@asyncAsDeferred
async def test_createConcentricStreet(self) -> None:
"""
:meth:`IMSDataStore.createConcentricStreet` creates a concentric
streets for the given event.
"""
for event, streetID, streetName in (
(Event(id="Foo"), "A", "Alpha"),
(Event(id="Foo Bar"), "B", "Bravo"),
(Event(id="XYZZY"), "C", "Charlie"),
):
store = await self.store()
await store.createEvent(event)
await store.createConcentricStreet(
event=event, id=streetID, name=streetName
)
stored = await store.concentricStreets(event=event)
self.assertEqual(len(stored), 1)
self.assertEqual(stored.get(streetID), streetName)
|
py | 1a4a42d1a9e364af13d9967ec2e23c009e6dae0b | # -*- coding: utf-8 -*-
import os
import sys
import threading
from copy import deepcopy
from tempfile import mkstemp
import six
from six import BytesIO
from .import_bind import PostImportHookPatching
from ..config import running_remotely
from ..debugging.log import LoggerRoot
from ..utilities.resource_monitor import ResourceMonitor
class PatchedMatplotlib:
_patched_original_plot = None
_patched_original_figure = None
_patched_original_savefig = None
__patched_original_imshow = None
__patched_original_draw_all = None
__patched_draw_all_recursion_guard = False
_global_plot_counter = -1
_global_image_counter = -1
_global_image_counter_limit = None
_last_iteration_plot_titles = {}
_current_task = None
_support_image_plot = False
_matplotlylib = None
_plotly_renderer = None
_lock_renderer = threading.RLock()
_recursion_guard = {}
_matplot_major_version = 2
_logger_started_reporting = False
_matplotlib_reported_titles = set()
class _PatchWarnings(object):
def __init__(self):
pass
def warn(self, text, *args, **kwargs):
raise ValueError(text)
def __getattr__(self, item):
def bypass(*args, **kwargs):
pass
return bypass
@staticmethod
def patch_matplotlib():
# only once
if PatchedMatplotlib._patched_original_plot is not None:
return True
# make sure we only patch once
PatchedMatplotlib._patched_original_plot = False
# noinspection PyBroadException
try:
# we support matplotlib version 2.0.0 and above
import matplotlib
PatchedMatplotlib._matplot_major_version = int(matplotlib.__version__.split('.')[0])
if PatchedMatplotlib._matplot_major_version < 2:
LoggerRoot.get_base_logger().warning(
'matplotlib binding supports version 2.0 and above, found version {}'.format(
matplotlib.__version__))
PatchedMatplotlib._patched_original_plot = False
return False
if running_remotely():
# disable GUI backend - make headless
matplotlib.rcParams['backend'] = 'agg'
import matplotlib.pyplot
matplotlib.pyplot.switch_backend('agg')
import matplotlib.pyplot as plt
import matplotlib.figure as figure
if six.PY2:
PatchedMatplotlib._patched_original_plot = staticmethod(plt.show)
PatchedMatplotlib._patched_original_imshow = staticmethod(plt.imshow)
PatchedMatplotlib._patched_original_figure = staticmethod(figure.Figure.show)
PatchedMatplotlib._patched_original_savefig = staticmethod(figure.Figure.savefig)
else:
PatchedMatplotlib._patched_original_plot = plt.show
PatchedMatplotlib._patched_original_imshow = plt.imshow
PatchedMatplotlib._patched_original_figure = figure.Figure.show
PatchedMatplotlib._patched_original_savefig = figure.Figure.savefig
# noinspection PyBroadException
try:
import matplotlib.pylab as pltlab
if plt.show == pltlab.show:
pltlab.show = PatchedMatplotlib.patched_show
if plt.imshow == pltlab.imshow:
pltlab.imshow = PatchedMatplotlib.patched_imshow
except Exception:
pass
plt.show = PatchedMatplotlib.patched_show
figure.Figure.show = PatchedMatplotlib.patched_figure_show
sys.modules['matplotlib'].pyplot.imshow = PatchedMatplotlib.patched_imshow
sys.modules['matplotlib'].figure.Figure.savefig = PatchedMatplotlib.patched_savefig
# patch plotly so we know it failed us.
from plotly.matplotlylib import renderer
renderer.warnings = PatchedMatplotlib._PatchWarnings()
# ignore deprecation warnings from plotly to matplotlib
try:
import warnings
warnings.filterwarnings(action='ignore', category=matplotlib.MatplotlibDeprecationWarning,
module='plotly')
warnings.filterwarnings(action='ignore', category=UserWarning, module='plotly')
except Exception:
pass
except Exception:
return False
# patch IPython matplotlib inline mode
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
ip = get_ipython()
if ip and matplotlib.is_interactive():
# instead of hooking ipython, we should hook the matplotlib
import matplotlib.pyplot as plt
PatchedMatplotlib.__patched_original_draw_all = plt.draw_all
plt.draw_all = PatchedMatplotlib.__patched_draw_all
# ip.events.register('post_execute', PatchedMatplotlib.ipython_post_execute_hook)
except Exception:
pass
# update api version
from ..backend_api import Session
PatchedMatplotlib._support_image_plot = Session.check_min_api_version('2.2')
# create plotly renderer
try:
from plotly import optional_imports
PatchedMatplotlib._matplotlylib = optional_imports.get_module('plotly.matplotlylib')
PatchedMatplotlib._plotly_renderer = PatchedMatplotlib._matplotlylib.PlotlyRenderer()
except Exception:
pass
return True
@staticmethod
def update_current_task(task):
# make sure we have a default vale
if PatchedMatplotlib._global_image_counter_limit is None:
from ..config import config
PatchedMatplotlib._global_image_counter_limit = config.get('metric.matplotlib_untitled_history_size', 100)
# if we already patched it, just update the current task
if PatchedMatplotlib._patched_original_plot is not None:
PatchedMatplotlib._current_task = task
# if matplotlib is not loaded yet, get a callback hook
elif not running_remotely() and \
('matplotlib.pyplot' not in sys.modules and 'matplotlib.pylab' not in sys.modules):
PatchedMatplotlib._current_task = task
PostImportHookPatching.add_on_import('matplotlib.pyplot', PatchedMatplotlib.patch_matplotlib)
PostImportHookPatching.add_on_import('matplotlib.pylab', PatchedMatplotlib.patch_matplotlib)
elif PatchedMatplotlib.patch_matplotlib():
PatchedMatplotlib._current_task = task
@staticmethod
def patched_imshow(*args, **kw):
ret = PatchedMatplotlib._patched_original_imshow(*args, **kw)
try:
from matplotlib import _pylab_helpers
# store on the plot that this is an imshow plot
stored_figure = _pylab_helpers.Gcf.get_active()
if stored_figure:
stored_figure._trains_is_imshow = 1 if not hasattr(stored_figure, '_trains_is_imshow') \
else stored_figure._trains_is_imshow + 1
except Exception:
pass
return ret
@staticmethod
def patched_savefig(self, *args, **kw):
ret = PatchedMatplotlib._patched_original_savefig(self, *args, **kw)
# noinspection PyBroadException
try:
fname = kw.get('fname') or args[0]
from pathlib2 import Path
if six.PY3:
from pathlib import Path as Path3
else:
Path3 = Path
# if we are not storing into a file (str/Path) do not log the matplotlib
if not isinstance(fname, (str, Path, Path3)):
return ret
except Exception:
pass
tid = threading._get_ident() if six.PY2 else threading.get_ident()
if not PatchedMatplotlib._recursion_guard.get(tid):
PatchedMatplotlib._recursion_guard[tid] = True
# noinspection PyBroadException
try:
PatchedMatplotlib._report_figure(specific_fig=self, set_active=False)
except Exception:
pass
PatchedMatplotlib._recursion_guard[tid] = False
return ret
@staticmethod
def patched_figure_show(self, *args, **kw):
tid = threading._get_ident() if six.PY2 else threading.get_ident()
if PatchedMatplotlib._recursion_guard.get(tid):
# we are inside a gaurd do nothing
return PatchedMatplotlib._patched_original_figure(self, *args, **kw)
PatchedMatplotlib._recursion_guard[tid] = True
PatchedMatplotlib._report_figure(set_active=False, specific_fig=self)
ret = PatchedMatplotlib._patched_original_figure(self, *args, **kw)
PatchedMatplotlib._recursion_guard[tid] = False
return ret
@staticmethod
def patched_show(*args, **kw):
tid = threading._get_ident() if six.PY2 else threading.get_ident()
PatchedMatplotlib._recursion_guard[tid] = True
# noinspection PyBroadException
try:
figures = PatchedMatplotlib._get_output_figures(None, all_figures=True)
for figure in figures:
# if this is a stale figure (just updated) we should send it, the rest will not be stale
if figure.canvas.figure.stale or (hasattr(figure, '_trains_is_imshow') and figure._trains_is_imshow):
PatchedMatplotlib._report_figure(stored_figure=figure)
except Exception:
pass
ret = PatchedMatplotlib._patched_original_plot(*args, **kw)
if PatchedMatplotlib._current_task and sys.modules['matplotlib'].rcParams['backend'] == 'agg':
# clear the current plot, because no one else will
# noinspection PyBroadException
try:
if sys.modules['matplotlib'].rcParams['backend'] == 'agg':
import matplotlib.pyplot as plt
plt.clf()
except Exception:
pass
PatchedMatplotlib._recursion_guard[tid] = False
return ret
@staticmethod
def _report_figure(force_save_as_image=False, stored_figure=None, set_active=True, specific_fig=None):
if not PatchedMatplotlib._current_task:
return
# noinspection PyBroadException
try:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
from plotly.io import templates
if specific_fig is None:
# store the figure object we just created (if it is not already there)
stored_figure = stored_figure or _pylab_helpers.Gcf.get_active()
if not stored_figure:
# nothing for us to do
return
# check if this is an imshow
if hasattr(stored_figure, '_trains_is_imshow'):
# flag will be cleared when calling clf() (object will be replaced)
stored_figure._trains_is_imshow = max(0, stored_figure._trains_is_imshow - 1)
force_save_as_image = True
# get current figure
mpl_fig = stored_figure.canvas.figure # plt.gcf()
else:
mpl_fig = specific_fig
# convert to plotly
image = None
plotly_fig = None
image_format = 'jpeg'
fig_dpi = 300
if force_save_as_image:
# if this is an image, store as is.
fig_dpi = None
else:
image_format = 'svg'
# protect with lock, so we support multiple threads using the same renderer
PatchedMatplotlib._lock_renderer.acquire()
# noinspection PyBroadException
try:
def our_mpl_to_plotly(fig):
if not PatchedMatplotlib._matplotlylib or not PatchedMatplotlib._plotly_renderer:
return None
plotly_renderer = PatchedMatplotlib._matplotlylib.PlotlyRenderer()
PatchedMatplotlib._matplotlylib.Exporter(plotly_renderer, close_mpl=False).run(fig)
x_ticks = list(plotly_renderer.current_mpl_ax.get_xticklabels())
if x_ticks:
# noinspection PyBroadException
try:
# check if all values can be cast to float
[float(t.get_text().replace('−', '-')) for t in x_ticks]
except Exception:
# noinspection PyBroadException
try:
plotly_renderer.plotly_fig['layout']['xaxis1'].update({
'ticktext': [t.get_text() for t in x_ticks],
'tickvals': [t.get_position()[0] for t in x_ticks],
})
except Exception:
pass
y_ticks = list(plotly_renderer.current_mpl_ax.get_yticklabels())
if y_ticks:
# noinspection PyBroadException
try:
# check if all values can be cast to float
_ = [float(t.get_text().replace('−', '-')) for t in y_ticks]
except Exception:
# noinspection PyBroadException
try:
plotly_renderer.plotly_fig['layout']['yaxis1'].update({
'ticktext': [t.get_text() for t in y_ticks],
'tickvals': [t.get_position()[1] for t in y_ticks],
})
except Exception:
pass
return deepcopy(plotly_renderer.plotly_fig)
plotly_fig = our_mpl_to_plotly(mpl_fig)
# noinspection PyBroadException
try:
if 'none' in templates:
plotly_fig._layout_obj.template = templates['none']
except Exception:
pass
except Exception as ex:
# this was an image, change format to png
image_format = 'jpeg' if 'selfie' in str(ex) else 'png'
fig_dpi = 300
finally:
PatchedMatplotlib._lock_renderer.release()
# plotly could not serialize the plot, we should convert to image
if not plotly_fig:
plotly_fig = None
# noinspection PyBroadException
try:
# first try SVG if we fail then fallback to png
buffer_ = BytesIO()
a_plt = specific_fig if specific_fig is not None else plt
if PatchedMatplotlib._matplot_major_version < 3:
a_plt.savefig(buffer_, dpi=fig_dpi, format=image_format, bbox_inches='tight', pad_inches=0,
frameon=False)
else:
a_plt.savefig(buffer_, dpi=fig_dpi, format=image_format, bbox_inches='tight', pad_inches=0,
facecolor=None)
buffer_.seek(0)
except Exception:
image_format = 'png'
buffer_ = BytesIO()
a_plt = specific_fig if specific_fig is not None else plt
if PatchedMatplotlib._matplot_major_version < 3:
a_plt.savefig(buffer_, dpi=fig_dpi, format=image_format, bbox_inches='tight', pad_inches=0,
frameon=False)
else:
a_plt.savefig(buffer_, dpi=fig_dpi, format=image_format, bbox_inches='tight', pad_inches=0,
facecolor=None)
buffer_.seek(0)
fd, image = mkstemp(suffix='.' + image_format)
os.write(fd, buffer_.read())
os.close(fd)
# check if we need to restore the active object
if set_active and not _pylab_helpers.Gcf.get_active():
_pylab_helpers.Gcf.set_active(stored_figure)
# get the main task
reporter = PatchedMatplotlib._current_task.reporter
if reporter is not None:
if mpl_fig.texts:
plot_title = mpl_fig.texts[0].get_text()
else:
gca = mpl_fig.gca()
plot_title = gca.title.get_text() if gca.title else None
# remove borders and size, we should let the web take care of that
if plotly_fig:
last_iteration = PatchedMatplotlib._get_last_iteration()
if plot_title:
title = PatchedMatplotlib._enforce_unique_title_per_iteration(plot_title, last_iteration)
else:
PatchedMatplotlib._global_plot_counter += 1
title = 'untitled %02d' % PatchedMatplotlib._global_plot_counter
plotly_fig.layout.margin = {}
plotly_fig.layout.autosize = True
plotly_fig.layout.height = None
plotly_fig.layout.width = None
# send the plot event
plotly_dict = plotly_fig.to_plotly_json()
if not plotly_dict.get('layout'):
plotly_dict['layout'] = {}
plotly_dict['layout']['title'] = title
PatchedMatplotlib._matplotlib_reported_titles.add(title)
reporter.report_plot(title=title, series='plot', plot=plotly_dict, iter=last_iteration)
else:
logger = PatchedMatplotlib._current_task.get_logger()
# this is actually a failed plot, we should put it under plots:
# currently disabled
if force_save_as_image or not PatchedMatplotlib._support_image_plot:
last_iteration = PatchedMatplotlib._get_last_iteration()
# send the plot as image
if plot_title:
title = PatchedMatplotlib._enforce_unique_title_per_iteration(plot_title, last_iteration)
else:
PatchedMatplotlib._global_image_counter += 1
title = 'untitled %02d' % (PatchedMatplotlib._global_image_counter %
PatchedMatplotlib._global_image_counter_limit)
PatchedMatplotlib._matplotlib_reported_titles.add(title)
logger.report_image(title=title, series='plot image', local_path=image,
delete_after_upload=True, iteration=last_iteration)
else:
# send the plot as plotly with embedded image
last_iteration = PatchedMatplotlib._get_last_iteration()
if plot_title:
title = PatchedMatplotlib._enforce_unique_title_per_iteration(plot_title, last_iteration)
else:
PatchedMatplotlib._global_plot_counter += 1
title = 'untitled %02d' % (PatchedMatplotlib._global_plot_counter %
PatchedMatplotlib._global_image_counter_limit)
PatchedMatplotlib._matplotlib_reported_titles.add(title)
# noinspection PyProtectedMember
logger._report_image_plot_and_upload(
title=title, series='plot image', path=image,
delete_after_upload=True, iteration=last_iteration)
except Exception:
# plotly failed
pass
return
@staticmethod
def _enforce_unique_title_per_iteration(title, last_iteration):
# type: (str, int) -> str
"""
Matplotlib with specific title will reset the title counter on every new iteration.
Calling title twice each iteration will produce "title" and "title/1" for every iteration
:param title: original matplotlib title
:param last_iteration: the current "last_iteration"
:return: new title to use (with counter attached if necessary)
"""
# check if we already encountered the title
if title in PatchedMatplotlib._last_iteration_plot_titles:
# if we have check the last iteration
title_last_iteration, title_counter = PatchedMatplotlib._last_iteration_plot_titles[title]
# if this is a new iteration start from the beginning
if last_iteration == title_last_iteration:
title_counter += 1
else: # if this is a new iteration start from the beginning
title_last_iteration = last_iteration
title_counter = 0
else:
# this is a new title
title_last_iteration = last_iteration
title_counter = 0
base_title = title
# if this is the zero counter to not add the counter to the title
if title_counter != 0:
title = base_title + '/%d' % title_counter
# update back the title iteration counter
PatchedMatplotlib._last_iteration_plot_titles[base_title] = (title_last_iteration, title_counter)
return title
@staticmethod
def _get_output_figures(stored_figure, all_figures):
try:
from matplotlib import _pylab_helpers
if all_figures:
return list(_pylab_helpers.Gcf.figs.values())
else:
return [stored_figure] or [_pylab_helpers.Gcf.get_active()]
except Exception:
return []
@staticmethod
def __patched_draw_all(*args, **kwargs):
recursion_guard = PatchedMatplotlib.__patched_draw_all_recursion_guard
if not recursion_guard:
PatchedMatplotlib.__patched_draw_all_recursion_guard = True
ret = PatchedMatplotlib.__patched_original_draw_all(*args, **kwargs)
if not recursion_guard:
PatchedMatplotlib.ipython_post_execute_hook()
PatchedMatplotlib.__patched_draw_all_recursion_guard = False
return ret
@staticmethod
def _get_last_iteration():
if PatchedMatplotlib._logger_started_reporting:
return PatchedMatplotlib._current_task.get_last_iteration()
# get the reported plot titles (exclude us)
reported_titles = ResourceMonitor.get_logger_reported_titles(PatchedMatplotlib._current_task)
if not reported_titles:
return 0
# check that this is not only us
if not (set(reported_titles) - PatchedMatplotlib._matplotlib_reported_titles):
return 0
# mark reporting started
PatchedMatplotlib._logger_started_reporting = True
return PatchedMatplotlib._current_task.get_last_iteration()
@staticmethod
def ipython_post_execute_hook():
# noinspection PyBroadException
try:
from matplotlib import _pylab_helpers
for i, f_mgr in enumerate(_pylab_helpers.Gcf.get_all_fig_managers()):
if not f_mgr.canvas.figure.stale:
PatchedMatplotlib._report_figure(stored_figure=f_mgr)
except Exception:
pass
|
py | 1a4a42d981d8e5a0544e4c0f11fdc27ef218fff8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py example simple] motion basic test for doosan robot
# @author Kab Kyoum Kim ([email protected])
import rospy
import os
import threading, time
import sys
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import pass : DSR_ROBOT.py
# for single robot
ROBOT_ID = "dsr01"
ROBOT_MODEL = "m1013"
import DR_init
DR_init.__dsr__id = ROBOT_ID
DR_init.__dsr__model = ROBOT_MODEL
from DSR_ROBOT import *
def shutdown():
print "shutdown time!"
print "shutdown time!"
print "shutdown time!"
pub_stop.publish(stop_mode=STOP_TYPE_QUICK)
return 0
def msgRobotState_cb(msg):
msgRobotState_cb.count += 1
if (0==(msgRobotState_cb.count % 100)):
rospy.loginfo("________ ROBOT STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" actual_mode : %d" % (msg.actual_mode))
print(" actual_space : %d" % (msg.actual_space))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
print(" current_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velj[0],msg.current_velj[1],msg.current_velj[2],msg.current_velj[3],msg.current_velj[4],msg.current_velj[5]))
print(" joint_abs : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_abs[0],msg.joint_abs[1],msg.joint_abs[2],msg.joint_abs[3],msg.joint_abs[4],msg.joint_abs[5]))
print(" joint_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_err[0],msg.joint_err[1],msg.joint_err[2],msg.joint_err[3],msg.joint_err[4],msg.joint_err[5]))
print(" target_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_posj[0],msg.target_posj[1],msg.target_posj[2],msg.target_posj[3],msg.target_posj[4],msg.target_posj[5]))
print(" target_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_velj[0],msg.target_velj[1],msg.target_velj[2],msg.target_velj[3],msg.target_velj[4],msg.target_velj[5]))
print(" current_posx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posx[0],msg.current_posx[1],msg.current_posx[2],msg.current_posx[3],msg.current_posx[4],msg.current_posx[5]))
print(" current_velx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velx[0],msg.current_velx[1],msg.current_velx[2],msg.current_velx[3],msg.current_velx[4],msg.current_velx[5]))
print(" task_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.task_err[0],msg.task_err[1],msg.task_err[2],msg.task_err[3],msg.task_err[4],msg.task_err[5]))
print(" solution_space : %d" % (msg.solution_space))
sys.stdout.write(" rotation_matrix : ")
for i in range(0 , 3):
sys.stdout.write( "dim : [%d]"% i)
sys.stdout.write(" [ ")
for j in range(0 , 3):
sys.stdout.write("%d " % msg.rotation_matrix[i].data[j])
sys.stdout.write("] ")
print ##end line
print(" dynamic_tor : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.dynamic_tor[0],msg.dynamic_tor[1],msg.dynamic_tor[2],msg.dynamic_tor[3],msg.dynamic_tor[4],msg.dynamic_tor[5]))
print(" actual_jts : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_jts[0],msg.actual_jts[1],msg.actual_jts[2],msg.actual_jts[3],msg.actual_jts[4],msg.actual_jts[5]))
print(" actual_ejt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ejt[0],msg.actual_ejt[1],msg.actual_ejt[2],msg.actual_ejt[3],msg.actual_ejt[4],msg.actual_ejt[5]))
print(" actual_ett : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ett[0],msg.actual_ett[1],msg.actual_ett[2],msg.actual_ett[3],msg.actual_ett[4],msg.actual_ett[5]))
print(" sync_time : %7.3f" % (msg.sync_time))
print(" actual_bk : %d %d %d %d %d %d" % (msg.actual_bk[0],msg.actual_bk[1],msg.actual_bk[2],msg.actual_bk[3],msg.actual_bk[4],msg.actual_bk[5]))
print(" actual_bt : %d %d %d %d %d " % (msg.actual_bt[0],msg.actual_bt[1],msg.actual_bt[2],msg.actual_bt[3],msg.actual_bt[4]))
print(" actual_mc : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mc[0],msg.actual_mc[1],msg.actual_mc[2],msg.actual_mc[3],msg.actual_mc[4],msg.actual_mc[5]))
print(" actual_mt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mt[0],msg.actual_mt[1],msg.actual_mt[2],msg.actual_mt[3],msg.actual_mt[4],msg.actual_mt[5]))
#print digital i/o
sys.stdout.write(" ctrlbox_digital_input : ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_input[i])
print ##end line
sys.stdout.write(" ctrlbox_digital_output: ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_output[i])
print
sys.stdout.write(" flange_digital_input : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_input[i])
print
sys.stdout.write(" flange_digital_output : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_output[i])
print
#print modbus i/o
sys.stdout.write(" modbus_state : " )
if len(msg.modbus_state) > 0:
for i in range(0 , len(msg.modbus_state)):
sys.stdout.write("[" + msg.modbus_state[i].modbus_symbol)
sys.stdout.write(", %d] " % msg.modbus_state[i].modbus_value)
print
print(" access_control : %d" % (msg.access_control))
print(" homming_completed : %d" % (msg.homming_completed))
print(" tp_initialized : %d" % (msg.tp_initialized))
print(" mastering_need : %d" % (msg.mastering_need))
print(" drl_stopped : %d" % (msg.drl_stopped))
print(" disconnected : %d" % (msg.disconnected))
msgRobotState_cb.count = 0
def thread_subscriber():
rospy.Subscriber('/'+ROBOT_ID +ROBOT_MODEL+'/state', RobotState, msgRobotState_cb)
rospy.spin()
#rospy.spinner(2)
if __name__ == "__main__":
rospy.init_node('dsr_service_motion_simple_py')
rospy.on_shutdown(shutdown)
t1 = threading.Thread(target=thread_subscriber)
t1.daemon = True
t1.start()
pub_stop = rospy.Publisher('/'+ROBOT_ID +ROBOT_MODEL+'/stop', RobotStop, queue_size=10)
set_velx(30,20) # set global task speed: 30(mm/sec), 20(deg/sec)
set_accx(60,40) # set global task accel: 60(mm/sec2), 40(deg/sec2)
velx=[50, 50]
accx=[100, 100]
p1= posj(0,0,0,0,0,0) #joint
p2= posj(0.0, 0.0, 90.0, 0.0, 90.0, 0.0) #joint
x1= posx(400, 500, 800.0, 0.0, 180.0, 0.0) #task
x2= posx(400, 500, 500.0, 0.0, 180.0, 0.0) #task
c1 = posx(559,434.5,651.5,0,180,0)
c2 = posx(559,434.5,251.5,0,180,0)
q0 = posj(0,0,0,0,0,0)
q1 = posj(10, -10, 20, -30, 10, 20)
q2 = posj(25, 0, 10, -50, 20, 40)
q3 = posj(50, 50, 50, 50, 50, 50)
q4 = posj(30, 10, 30, -20, 10, 60)
q5 = posj(20, 20, 40, 20, 0, 90)
qlist = [q0, q1, q2, q3, q4, q5]
x1 = posx(600, 600, 600, 0, 175, 0)
x2 = posx(600, 750, 600, 0, 175, 0)
x3 = posx(150, 600, 450, 0, 175, 0)
x4 = posx(-300, 300, 300, 0, 175, 0)
x5 = posx(-200, 700, 500, 0, 175, 0)
x6 = posx(600, 600, 400, 0, 175, 0)
xlist = [x1, x2, x3, x4, x5, x6]
X1 = posx(370, 670, 650, 0, 180, 0)
X1a = posx(370, 670, 400, 0, 180, 0)
X1a2= posx(370, 545, 400, 0, 180, 0)
X1b = posx(370, 595, 400, 0, 180, 0)
X1b2= posx(370, 670, 400, 0, 180, 0)
X1c = posx(370, 420, 150, 0, 180, 0)
X1c2= posx(370, 545, 150, 0, 180, 0)
X1d = posx(370, 670, 275, 0, 180, 0)
X1d2= posx(370, 795, 150, 0, 180, 0)
seg11 = posb(DR_LINE, X1, radius=20)
seg12 = posb(DR_CIRCLE, X1a, X1a2, radius=21)
seg14 = posb(DR_LINE, X1b2, radius=20)
seg15 = posb(DR_CIRCLE, X1c, X1c2, radius=22)
seg16 = posb(DR_CIRCLE, X1d, X1d2, radius=23)
b_list1 = [seg11, seg12, seg14, seg15, seg16]
while not rospy.is_shutdown():
movej(p2, vel=100, acc=100)
movejx(x1, vel=30, acc=60, sol=0)
movel(x2, velx, accx)
movec(c1, c2, velx, accx)
movesj(qlist, vel=100, acc=100)
movesx(xlist, vel=100, acc=100)
move_spiral(rev=9.5,rmax=20.0,lmax=50.0,time=20.0,axis=DR_AXIS_Z,ref=DR_TOOL)
move_periodic(amp =[10,0,0,0,30,0], period=1.0, atime=0.2, repeat=5, ref=DR_TOOL)
moveb(b_list1, vel=150, acc=250, ref=DR_BASE, mod=DR_MV_MOD_ABS)
print 'good bye!'
|
py | 1a4a432dc71b22b07b1f517d1f36f1d2e31c3138 | #Reading from 'p' is conflicted
from polyphony import module
from polyphony.io import Port
@module
class io_read_conflict03:
def __init__(self):
self.p = Port(int, 'in', protocol='valid')
self.append_worker(self.w)
self.append_worker(self.w)
def w(self):
data = self.p.rd()
print(data)
m = io_read_conflict03()
|
py | 1a4a436d7fa652367c9ff078440fc478b6650640 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "flow.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | 1a4a4454f3d82001f15c10a6f72d62b1464a07a2 | # AUTOGENERATED FILE - DO NOT MODIFY!
# This file was generated by Djinni from all_datatypes.djinni
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyBinary, CPyBoxedBool, CPyDate, CPyEnum, CPyObject, CPyObject, CPyObjectProxy, CPyPrimitive, CPyRecord, CPyString
from dh__list_bool import ListBoolHelper
from dh__map_int8_t_bool import MapInt8TBoolHelper
from dh__map_int8_t_bool import MapInt8TBoolProxy
from dh__set_bool import SetBoolHelper
from dh__set_bool import SetBoolProxy
from enum_data import EnumData
from _cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
from all_datatypes import AllDatatypes
class AllDatatypesHelper:
@staticmethod
def release(c_ptr):
assert c_ptr in c_data_set
c_data_set.remove(ffi.cast("void*", c_ptr))
@ffi.callback("bool(struct DjinniRecordHandle *)")
def get_all_datatypes_f1(cself):
try:
_ret = CPyPrimitive.fromPy(CPyRecord.toPy(None, cself).booleanData)
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("int8_t(struct DjinniRecordHandle *)")
def get_all_datatypes_f2(cself):
try:
_ret = CPyPrimitive.fromPy(CPyRecord.toPy(None, cself).integer8Data)
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("int16_t(struct DjinniRecordHandle *)")
def get_all_datatypes_f3(cself):
try:
_ret = CPyPrimitive.fromPy(CPyRecord.toPy(None, cself).integer16Data)
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("int32_t(struct DjinniRecordHandle *)")
def get_all_datatypes_f4(cself):
try:
_ret = CPyPrimitive.fromPy(CPyRecord.toPy(None, cself).integer32Data)
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("int64_t(struct DjinniRecordHandle *)")
def get_all_datatypes_f5(cself):
try:
_ret = CPyPrimitive.fromPy(CPyRecord.toPy(None, cself).integer64Data)
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("float(struct DjinniRecordHandle *)")
def get_all_datatypes_f6(cself):
try:
_ret = CPyPrimitive.fromPy(CPyRecord.toPy(None, cself).float32Data)
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("double(struct DjinniRecordHandle *)")
def get_all_datatypes_f7(cself):
try:
_ret = CPyPrimitive.fromPy(CPyRecord.toPy(None, cself).float64Data)
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniString *(struct DjinniRecordHandle *)")
def get_all_datatypes_f8(cself):
try:
with CPyString.fromPy(CPyRecord.toPy(None, cself).stringData) as py_obj:
_ret = py_obj.release_djinni_string()
assert _ret != ffi.NULL
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniBinary *(struct DjinniRecordHandle *)")
def get_all_datatypes_f9(cself):
try:
with CPyBinary.fromPy(CPyRecord.toPy(None, cself).binaryData) as py_obj:
_ret = py_obj.release_djinni_binary()
assert _ret != ffi.NULL
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("uint64_t(struct DjinniRecordHandle *)")
def get_all_datatypes_f10(cself):
try:
_ret = CPyDate.fromPy(CPyRecord.toPy(None, cself).dateData)
assert _ret != ffi.NULL
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniObjectHandle *(struct DjinniRecordHandle *)")
def get_all_datatypes_f11(cself):
try:
_ret = CPyObject.fromPy(ListBoolHelper.c_data_set, CPyRecord.toPy(None, cself).listData)
assert _ret != ffi.NULL
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniObjectHandle *(struct DjinniRecordHandle *)")
def get_all_datatypes_f12(cself):
try:
_ret = CPyObjectProxy.fromPy(SetBoolHelper.c_data_set, SetBoolProxy(CPyRecord.toPy(None, cself).setData))
assert _ret != ffi.NULL
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniObjectHandle *(struct DjinniRecordHandle *)")
def get_all_datatypes_f13(cself):
try:
_ret = CPyObjectProxy.fromPy(MapInt8TBoolHelper.c_data_set, MapInt8TBoolProxy(CPyRecord.toPy(None, cself).mapData))
assert _ret != ffi.NULL
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniBoxedBool *(struct DjinniRecordHandle *)")
def get_all_datatypes_f14(cself):
try:
with CPyBoxedBool.fromPyOpt(CPyRecord.toPy(None, cself).optionalData) as py_obj:
return py_obj.release_djinni_boxed()
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("int(struct DjinniRecordHandle *)")
def get_all_datatypes_f15(cself):
try:
_ret = CPyEnum.fromPy(CPyRecord.toPy(None, cself).enum_data)
assert _ret != -1
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniRecordHandle *(bool,int8_t,int16_t,int32_t,int64_t,float,double,struct DjinniString *,struct DjinniBinary *,uint64_t,struct DjinniObjectHandle *,struct DjinniObjectHandle *,struct DjinniObjectHandle *,struct DjinniBoxedBool *,int)")
def create_all_datatypes(booleanData,integer8Data,integer16Data,integer32Data,integer64Data,float32Data,float64Data,stringData,binaryData,dateData,listData,setData,mapData,optionalData,enum_data):
py_rec = AllDatatypes(
CPyPrimitive.toPy(booleanData),
CPyPrimitive.toPy(integer8Data),
CPyPrimitive.toPy(integer16Data),
CPyPrimitive.toPy(integer32Data),
CPyPrimitive.toPy(integer64Data),
CPyPrimitive.toPy(float32Data),
CPyPrimitive.toPy(float64Data),
CPyString.toPy(stringData),
CPyBinary.toPy(binaryData),
CPyDate.toPy(dateData),
CPyObject.toPy(ListBoolHelper.c_data_set, listData),
CPyObjectProxy.toPyObj(SetBoolHelper.c_data_set, setData),
CPyObjectProxy.toPyObj(MapInt8TBoolHelper.c_data_set, mapData),
CPyBoxedBool.toPyOpt(optionalData),
CPyEnum.toPy(EnumData, enum_data))
return CPyRecord.fromPy(AllDatatypes.c_data_set, py_rec) #to do: can be optional?
@ffi.callback("void (struct DjinniRecordHandle *)")
def __delete(dh):
assert dh in AllDatatypes.c_data_set
AllDatatypes.c_data_set.remove(dh)
@staticmethod
def _add_callbacks():
lib.all_datatypes_add_callback___delete(AllDatatypesHelper.__delete)
lib.all_datatypes_add_callback_create_all_datatypes(AllDatatypesHelper.create_all_datatypes)
lib.all_datatypes_add_callback_get_all_datatypes_f1(AllDatatypesHelper.get_all_datatypes_f1)
lib.all_datatypes_add_callback_get_all_datatypes_f10(AllDatatypesHelper.get_all_datatypes_f10)
lib.all_datatypes_add_callback_get_all_datatypes_f11(AllDatatypesHelper.get_all_datatypes_f11)
lib.all_datatypes_add_callback_get_all_datatypes_f12(AllDatatypesHelper.get_all_datatypes_f12)
lib.all_datatypes_add_callback_get_all_datatypes_f13(AllDatatypesHelper.get_all_datatypes_f13)
lib.all_datatypes_add_callback_get_all_datatypes_f14(AllDatatypesHelper.get_all_datatypes_f14)
lib.all_datatypes_add_callback_get_all_datatypes_f15(AllDatatypesHelper.get_all_datatypes_f15)
lib.all_datatypes_add_callback_get_all_datatypes_f2(AllDatatypesHelper.get_all_datatypes_f2)
lib.all_datatypes_add_callback_get_all_datatypes_f3(AllDatatypesHelper.get_all_datatypes_f3)
lib.all_datatypes_add_callback_get_all_datatypes_f4(AllDatatypesHelper.get_all_datatypes_f4)
lib.all_datatypes_add_callback_get_all_datatypes_f5(AllDatatypesHelper.get_all_datatypes_f5)
lib.all_datatypes_add_callback_get_all_datatypes_f6(AllDatatypesHelper.get_all_datatypes_f6)
lib.all_datatypes_add_callback_get_all_datatypes_f7(AllDatatypesHelper.get_all_datatypes_f7)
lib.all_datatypes_add_callback_get_all_datatypes_f8(AllDatatypesHelper.get_all_datatypes_f8)
lib.all_datatypes_add_callback_get_all_datatypes_f9(AllDatatypesHelper.get_all_datatypes_f9)
AllDatatypesHelper._add_callbacks()
|
py | 1a4a44604481beb95caaeac230e11685373ac40a | # -*- coding:utf-8 -*-
# There are two sorted arrays nums1 and nums2 of size m and n respectively.
#
# Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
#
# Example 1:
#
# nums1 = [1, 3]
# nums2 = [2]
#
# The median is 2.0
#
#
#
# Example 2:
#
# nums1 = [1, 2]
# nums2 = [3, 4]
#
# The median is (2 + 3)/2 = 2.5
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums = sorted(nums1 + nums2)
t_len = len(nums)
if t_len == 1:
return nums[0]
if t_len % 2:
return nums[t_len/2]
else:
return (nums[t_len/2] + nums[t_len/2 -1]) /2.0
|
py | 1a4a44c8e528387f58beb013e1a18ac2c6f4b6b9 | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from itertools import product
from unittest import mock
import torch
from botorch.exceptions.errors import BotorchError
from botorch.utils.multi_objective.box_decompositions.box_decomposition import (
BoxDecomposition,
FastPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.utils import (
update_local_upper_bounds_incremental,
)
from botorch.utils.testing import BotorchTestCase
class DummyBoxDecomposition(BoxDecomposition):
def _partition_space(self):
pass
def compute_hypervolume(self):
pass
def get_hypercell_bounds(self):
pass
class DummyFastPartitioning(FastPartitioning, DummyBoxDecomposition):
def _get_partitioning(self):
pass
def _get_single_cell(self):
pass
class TestBoxDecomposition(BotorchTestCase):
def setUp(self):
self.ref_point_raw = torch.zeros(3, device=self.device)
self.Y_raw = torch.tensor(
[
[1.0, 2.0, 1.0],
[1.0, 1.0, 1.0],
[2.0, 0.5, 1.0],
],
device=self.device,
)
self.pareto_Y_raw = torch.tensor(
[
[1.0, 2.0, 1.0],
[2.0, 0.5, 1.0],
],
device=self.device,
)
def test_box_decomposition(self):
with self.assertRaises(TypeError):
BoxDecomposition()
for dtype, m, sort in product(
(torch.float, torch.double), (2, 3), (True, False)
):
with mock.patch.object(
DummyBoxDecomposition,
"_partition_space_2d" if m == 2 else "_partition_space",
) as mock_partition_space:
ref_point = self.ref_point_raw[:m].to(dtype=dtype)
Y = self.Y_raw[:, :m].to(dtype=dtype)
pareto_Y = self.pareto_Y_raw[:, :m].to(dtype=dtype)
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort)
# test pareto_Y before it is initialized
with self.assertRaises(BotorchError):
bd.pareto_Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort, Y=Y)
mock_partition_space.assert_called_once()
# test attributes
expected_pareto_Y = (
pareto_Y[torch.argsort(-pareto_Y[:, 0])] if sort else pareto_Y
)
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, Y))
self.assertTrue(torch.equal(bd._neg_Y, -Y))
self.assertTrue(torch.equal(bd._neg_pareto_Y, -expected_pareto_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
self.assertTrue(torch.equal(bd._neg_ref_point, -ref_point))
self.assertEqual(bd.num_outcomes, m)
# test empty Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort, Y=Y[:0])
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y[:0]))
# test _update_neg_Y
bd = DummyBoxDecomposition(ref_point=ref_point, sort=sort)
bd._update_neg_Y(Y[:2])
self.assertTrue(torch.equal(bd._neg_Y, -Y[:2]))
bd._update_neg_Y(Y[2:])
self.assertTrue(torch.equal(bd._neg_Y, -Y))
# test batch mode
if m == 2:
batch_Y = torch.stack([Y, Y + 1], dim=0)
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y
)
batch_expected_pareto_Y = torch.stack(
[expected_pareto_Y, expected_pareto_Y + 1], dim=0
)
self.assertTrue(torch.equal(bd.pareto_Y, batch_expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, batch_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
# test batch ref point
batch_ref_point = torch.stack([ref_point, ref_point + 1], dim=0)
bd = DummyBoxDecomposition(
ref_point=batch_ref_point, sort=sort, Y=batch_Y
)
self.assertTrue(torch.equal(bd.ref_point, batch_ref_point))
# test multiple batch dims
with self.assertRaises(NotImplementedError):
DummyBoxDecomposition(
ref_point=ref_point,
sort=sort,
Y=batch_Y.unsqueeze(0),
)
# test empty Y
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y[:, :0]
)
self.assertTrue(
torch.equal(bd.pareto_Y, batch_expected_pareto_Y[:, :0])
)
# test padded pareto frontiers with different numbers of
# points
batch_Y[1, 1] = batch_Y[1, 0] - 1
batch_Y[1, 2] = batch_Y[1, 0] - 2
bd = DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=batch_Y
)
batch_expected_pareto_Y = torch.stack(
[
expected_pareto_Y,
batch_Y[1, :1].expand(expected_pareto_Y.shape),
],
dim=0,
)
self.assertTrue(torch.equal(bd.pareto_Y, batch_expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, batch_Y))
else:
with self.assertRaises(NotImplementedError):
DummyBoxDecomposition(
ref_point=ref_point, sort=sort, Y=Y.unsqueeze(0)
)
def test_fast_partitioning(self):
with self.assertRaises(TypeError):
FastPartitioning()
for dtype, m in product(
(torch.float, torch.double),
(2, 3),
):
ref_point = self.ref_point_raw[:m].to(dtype=dtype)
Y = self.Y_raw[:, :m].to(dtype=dtype)
pareto_Y = self.pareto_Y_raw[:, :m].to(dtype=dtype)
sort = m == 2
expected_pareto_Y = (
pareto_Y[torch.argsort(-pareto_Y[:, 0])] if sort else pareto_Y
)
bd = DummyFastPartitioning(ref_point=ref_point, Y=Y)
self.assertTrue(torch.equal(bd.pareto_Y, expected_pareto_Y))
self.assertTrue(torch.equal(bd.Y, Y))
self.assertTrue(torch.equal(bd._neg_Y, -Y))
self.assertTrue(torch.equal(bd._neg_pareto_Y, -expected_pareto_Y))
self.assertTrue(torch.equal(bd.ref_point, ref_point))
self.assertTrue(torch.equal(bd._neg_ref_point, -ref_point))
self.assertEqual(bd.num_outcomes, m)
# test update
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"reset",
wraps=bd.reset,
) as mock_reset:
# with no existing neg_Y
bd.update(Y=Y[:2])
mock_reset.assert_called_once()
# test with existing Y
bd.update(Y=Y[2:])
# check that reset is only called when m=2
if m == 2:
mock_reset.assert_has_calls([mock.call(), mock.call()])
else:
mock_reset.assert_called_once()
# with existing neg_Y, and empty pareto_Y
bd = DummyFastPartitioning(ref_point=ref_point, Y=Y[:0])
with mock.patch.object(
DummyFastPartitioning,
"reset",
wraps=bd.reset,
) as mock_reset:
bd.update(Y=Y[0:])
mock_reset.assert_called_once()
# test empty pareto Y
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"_get_single_cell",
wraps=bd._get_single_cell,
) as mock_get_single_cell:
bd.update(Y=Y[:0])
mock_get_single_cell.assert_called_once()
# test batched empty pareto Y
if m == 2:
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch.object(
DummyFastPartitioning,
"_get_single_cell",
wraps=bd._get_single_cell,
) as mock_get_single_cell:
bd.update(Y=Y.unsqueeze(0)[:, :0])
mock_get_single_cell.assert_called_once()
# test that update_local_upper_bounds_incremental is called when m>2
bd = DummyFastPartitioning(ref_point=ref_point)
with mock.patch(
"botorch.utils.multi_objective.box_decompositions.box_decomposition."
"update_local_upper_bounds_incremental",
wraps=update_local_upper_bounds_incremental,
) as mock_update_local_upper_bounds_incremental, mock.patch.object(
DummyFastPartitioning,
"_get_partitioning",
wraps=bd._get_partitioning,
) as mock_get_partitioning, mock.patch.object(
DummyFastPartitioning,
"_partition_space_2d",
):
bd.update(Y=Y)
if m > 2:
mock_update_local_upper_bounds_incremental.assert_called_once()
# check that it is not called if the pareto set does not change
bd.update(Y=Y)
mock_update_local_upper_bounds_incremental.assert_called_once()
mock_get_partitioning.assert_called_once()
else:
self.assertEqual(
len(mock_update_local_upper_bounds_incremental.call_args_list),
0,
)
# test exception is raised for m=2, batched box decomposition using
# _partition_space
if m == 2:
with self.assertRaises(NotImplementedError):
DummyFastPartitioning(ref_point=ref_point, Y=Y.unsqueeze(0))
|
py | 1a4a44f99de5298b46ef8e1eb30551facee9a654 | """Exceptions raised by the dvc."""
from funcy import first
from dvc.utils import error_link, format_link, relpath
class DvcException(Exception):
"""Base class for all dvc exceptions."""
def __init__(self, msg, *args):
assert msg
super().__init__(msg, *args)
class InvalidArgumentError(ValueError, DvcException):
"""Thrown if arguments are invalid."""
class OutputDuplicationError(DvcException):
"""Thrown if a file/directory is specified as an output in more than one
stage.
Args:
output (unicode): path to the file/directory.
stages (list): list of paths to stages.
"""
def __init__(self, output, stages):
assert isinstance(output, str)
assert all(hasattr(stage, "relpath") for stage in stages)
if len(stages) == 1:
msg = "output '{}' is already specified in {}.".format(
output, first(stages)
)
else:
msg = "output '{}' is already specified in stages:\n{}".format(
output, "\n".join(f"\t- {s.addressing}" for s in stages),
)
super().__init__(msg)
self.stages = stages
self.output = output
class OutputNotFoundError(DvcException):
"""Thrown if a file/directory is not found as an output in any pipeline.
Args:
output (unicode): path to the file/directory.
"""
def __init__(self, output, repo=None):
self.output = output
self.repo = repo
super().__init__(
"Unable to find DVC-file with output '{path}'".format(
path=relpath(self.output)
)
)
class StagePathAsOutputError(DvcException):
"""Thrown if directory that stage is going to be saved in is specified as
an output of another stage.
Args:
stage (Stage): a stage that is in some other stages output
output (str): an output covering the stage above
"""
def __init__(self, stage, output):
assert isinstance(output, str)
super().__init__(
"{stage} is within an output '{output}' of another stage".format(
stage=stage, output=output
)
)
class CircularDependencyError(DvcException):
"""Thrown if a file/directory specified both as an output and as a
dependency.
Args:
dependency (str): path to the dependency.
"""
def __init__(self, dependency):
assert isinstance(dependency, str)
msg = "'{}' is specified as an output and as a dependency."
super().__init__(msg.format(dependency))
class ArgumentDuplicationError(DvcException):
"""Thrown if a file/directory is specified as a dependency/output more
than once.
Args:
path (str): path to the file/directory.
"""
def __init__(self, path):
assert isinstance(path, str)
super().__init__(f"file '{path}' is specified more than once.")
class MoveNotDataSourceError(DvcException):
"""Thrown when trying to move a file/directory that is not an output
in a data source stage.
Args:
path (str): path to the file/directory.
"""
def __init__(self, path):
msg = (
"move is not permitted for stages that are not data sources. "
"You need to either move '{path}' to a new location and edit "
"it by hand, or remove '{path}' and create a new one at the "
"desired location."
)
super().__init__(msg.format(path=path))
class NotDvcRepoError(DvcException):
"""Thrown if a directory is not a DVC repo"""
class DvcParserError(DvcException):
"""Base class for CLI parser errors."""
def __init__(self):
super().__init__("parser error")
class CyclicGraphError(DvcException):
def __init__(self, stages):
assert isinstance(stages, list)
msg = "Pipeline has a cycle involving: {}.".format(
", ".join(s.addressing for s in stages)
)
super().__init__(msg)
class ConfirmRemoveError(DvcException):
def __init__(self, path):
super().__init__(
"unable to remove '{}' without a confirmation. Use "
"`-f` to force.".format(path)
)
class InitError(DvcException):
pass
class ReproductionError(DvcException):
def __init__(self, dvc_file_name):
self.path = dvc_file_name
super().__init__(f"failed to reproduce '{dvc_file_name}'")
class BadMetricError(DvcException):
def __init__(self, paths):
super().__init__(
"the following metrics do not exist, "
"are not metrics files or are malformed: {paths}".format(
paths=", ".join(f"'{path}'" for path in paths)
)
)
class NoMetricsError(DvcException):
pass
class NoMetricsParsedError(NoMetricsError):
def __init__(self, command):
super().__init__(
f"Could not parse {command} files. Use `-v` option to see more "
"details."
)
class NoMetricsFoundError(NoMetricsError):
def __init__(self, command, run_options):
super().__init__(
f"No {command} files in this repository. "
f"Use `{run_options}` options for "
f"`dvc run` to mark stage outputs as {command}."
)
class RecursiveAddingWhileUsingFilename(DvcException):
def __init__(self):
super().__init__(
"cannot use `fname` with multiple targets or `-R|--recursive`"
)
class OverlappingOutputPathsError(DvcException):
def __init__(self, parent, overlapping_out, message):
self.parent = parent
self.overlapping_out = overlapping_out
super().__init__(message)
class CheckoutErrorSuggestGit(DvcException):
def __init__(self, target):
super().__init__(f"Did you mean `git checkout {target}`?")
class ETagMismatchError(DvcException):
def __init__(self, etag, cached_etag):
super().__init__(
"ETag mismatch detected when copying file to cache! "
"(expected: '{}', actual: '{}')".format(etag, cached_etag)
)
class FileMissingError(DvcException):
def __init__(self, path, hint=None):
self.path = path
hint = "" if hint is None else f". {hint}"
super().__init__(
f"Can't find '{path}' neither locally nor on remote{hint}"
)
class DvcIgnoreInCollectedDirError(DvcException):
def __init__(self, ignore_dirname):
super().__init__(
".dvcignore file should not be in collected dir path: "
"'{}'".format(ignore_dirname)
)
class GitHookAlreadyExistsError(DvcException):
def __init__(self, hook_name):
super().__init__(
"Hook '{}' already exists. Please refer to {} for more "
"info.".format(
hook_name, format_link("https://man.dvc.org/install")
)
)
class DownloadError(DvcException):
def __init__(self, amount):
self.amount = amount
super().__init__(f"{amount} files failed to download")
class UploadError(DvcException):
def __init__(self, amount):
self.amount = amount
super().__init__(f"{amount} files failed to upload")
class CheckoutError(DvcException):
def __init__(self, target_infos, stats=None):
self.target_infos = target_infos
self.stats = stats
targets = [str(t) for t in target_infos]
m = (
"Checkout failed for following targets:\n{}\nIs your "
"cache up to date?\n{}".format(
"\n".join(targets), error_link("missing-files"),
)
)
super().__init__(m)
class CollectCacheError(DvcException):
pass
class NoRemoteInExternalRepoError(DvcException):
def __init__(self, url):
super().__init__(
f"No DVC remote is specified in target repository '{url}'."
)
class NoOutputInExternalRepoError(DvcException):
def __init__(self, path, external_repo_path, external_repo_url):
super().__init__(
"Output '{}' not found in target repository '{}'".format(
relpath(path, external_repo_path), external_repo_url
)
)
class HTTPError(DvcException):
def __init__(self, code, reason):
super().__init__(f"'{code} {reason}'")
class PathMissingError(DvcException):
default_msg = (
"The path '{}' does not exist in the target repository '{}'"
" neither as a DVC output nor as a Git-tracked file."
)
default_msg_dvc_only = (
"The path '{}' does not exist in the target repository '{}'"
" as an DVC output."
)
def __init__(self, path, repo, dvc_only=False):
msg = self.default_msg if not dvc_only else self.default_msg_dvc_only
super().__init__(msg.format(path, repo))
self.dvc_only = dvc_only
class RemoteCacheRequiredError(DvcException):
def __init__(self, path_info):
super().__init__(
(
"Current operation was unsuccessful because '{}' requires "
"existing cache on '{}' remote. See {} for information on how "
"to set up remote cache."
).format(
path_info,
path_info.scheme,
format_link("https://man.dvc.org/config#cache"),
)
)
class IsADirectoryError(DvcException): # noqa,pylint:disable=redefined-builtin
"""Raised when a file operation is requested on a directory."""
class NoOutputOrStageError(DvcException):
"""
Raised when the target is neither an output nor a stage name in dvc.yaml
"""
def __init__(self, target, file):
super().__init__(
f"'{target}' "
f"does not exist as an output or a stage name in '{file}'"
)
class MergeError(DvcException):
pass
class CacheLinkError(DvcException):
SUPPORT_LINK = "See {} for more information.".format(
format_link(
"https://dvc.org/doc/user-guide/troubleshooting#cache-types"
)
)
def __init__(self, path_infos):
msg = "No possible cache link types for '{}'. {}".format(
", ".join([str(path) for path in path_infos]), self.SUPPORT_LINK,
)
super().__init__(msg)
self.path_infos = path_infos
|
py | 1a4a4562550eef029200c8c3681c0cab2b187097 | import sys
sys.path.append('..')
from inner import class_inner
class EarthPoint :
latitude : float
longitude : float
def __init__(self,latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def __str__(self):
fmt = self.formatter()
return fmt.as_str(self)
@class_inner
class formatter:
def as_str(self, v):
ns,ew = "NS"[v.latitude<0],"EW"[v.longitude<0]
return f"{abs(v.latitude):.4f}{ns} {abs(v.longitude):.4f}{ew}"
def _parse(self, s, card):
value,c = float(s[:-1]), s[-1].upper()
sign =(1,-1)[card.index(c)]
return sign*value
def from_str(self , geostr):
s = geostr.split()
if len(s)!=2:
raise ValueError("invalid string")
latitude = self._parse(s[0], "NS")
longitude = self._parse(s[1], "EW")
return self.outer(latitude, longitude)
# formatting
Paris = EarthPoint(48.866667, 2.333333)
print(str(Paris))
# parsing
fmt = EarthPoint.formatter()
geo = fmt.from_str('48.8667N 2.3333E')
print(geo)
|
py | 1a4a46a852790f1766873c563e32611b861a399a | import collections
import signal
from django.template import Template, Context
from . import flamegraph
try:
from debug_toolbar.panels import Panel
except ImportError as e:
import os
if os.environ.get('TESTING'):
import mock
Panel = mock.Mock()
else:
raise e
template = r"""
<style>
#FlamegraphPanel .djDebugPanelContent { padding:0; }
</style>
<template id="djdt-flamegraph-tpl">
<style>
body {margin: 0;}
</style>
{{ flamegraph|safe }}
<script>
init();
</script>
</template>
<iframe id="djdt-flamegraph-iframe" style="width:100%;height:100%;">
</iframe>
"""
from django.templatetags.static import static
class FlamegraphPanel(Panel):
title = 'Flamegraph'
template = 'djdt_flamegraph.html'
@property
def enabled(self):
key = 'djdt' + self.panel_id
return self.toolbar.request.COOKIES.get(key, 'off') == 'on'
@property
def content(self):
return Template(template).render(Context({
'flamegraph': flamegraph.stats_to_svg(self.sampler.get_stats())
}))
@property
def scripts(self):
scripts = super().scripts
scripts.append(static("djdt_flamegraph/djdt_flamegraph.js"))
return scripts
def enable_instrumentation(self):
self.sampler = Sampler()
def process_request(self, request):
self.sampler.start()
response = super().process_request(request)
self.sampler.stop()
return response
class Sampler(object):
def __init__(self, interval=0.001):
self.stack_counts = collections.defaultdict(int)
self.interval = interval
def _sample(self, signum, frame):
stack = []
while frame is not None:
formatted_frame = '{}({})'.format(frame.f_code.co_name,
frame.f_globals.get('__name__'))
stack.append(formatted_frame)
frame = frame.f_back
formatted_stack = ';'.join(reversed(stack))
self.stack_counts[formatted_stack] += 1
def get_stats(self):
return '\n'.join('%s %d' % (key, value) for key, value in sorted(self.stack_counts.items()))
def start(self):
signal.signal(signal.SIGALRM, self._sample)
signal.setitimer(signal.ITIMER_REAL, self.interval, self.interval)
def stop(self):
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
py | 1a4a46e4164b8cd8d5ff97e9534fb2f469979e09 | import logging
from logging import NullHandler
from sc2_build_tokenizer.parse import extract_builds
from sc2_build_tokenizer.tokenize import (
generate_build_tokens,
generate_token_distributions,
generate_token_paths,
)
from sc2_build_tokenizer.dataclasses import (
ParsedBuild,
TokenizedBuild,
TokenDistributions,
)
logging.getLogger('zephyrus_sc2_parser').setLevel(logging.ERROR)
logger = logging.getLogger(__name__).addHandler(NullHandler())
def tokenize(replay):
logger.info('Tokenizing builds with default distributions')
logger.info('Extracting builds from replays')
games = extract_builds(replay)
logger.info('Iterating through replays')
tokenized = []
for game in games:
races = []
for build in game:
races.append(build.race)
logger.info('Generating token paths for builds from current replay')
builds = []
for build in game:
player_race = build.race
opp_race = races[0] if races[1] == player_race else races[1]
paths = generate_token_paths(build.build, player_race, opp_race)
# only take the most likely path
builds.append(paths[0])
tokenized.append(builds)
logger.info('Completed generating tokenized builds from current replay')
logger.info('Completed generating all tokenized builds from replays')
return tokenized
|
py | 1a4a46e5e671f0717a1da83fb10b1b39c65648ed | class DocumentSplitter:
def __init__(self):
super().__init__() |
py | 1a4a4759af9398244834896d28f98922adff0c45 | """
# This script, search threshold for SparNet model by computing accuracy
# It also compute flops for SparNet model
#
# See Table 3 on the main paper
#
Example usage:
CUDA_VISIBLE_DEVICES=1 python3 -m evaluation.seach_sparnet_th --settings_file config/settings_ncaltech.yaml
CUDA_VISIBLE_DEVICES=1 python3 -m evaluation.seach_sparnet_th --settings_file config/settings_prophesee.yaml
CUDA_VISIBLE_DEVICES=0 python3 -m evaluation.seach_sparnet_th --settings_file config/settings_exp.yaml
"""
from config.settings import Settings
import numpy as np
import argparse
from training.object_cls_trainer import DSSClsModel
from training.object_det_trainer import DSSDetModel
from training.exp_trainer import ExpModel
from utils.log_utils import loadCheckpoint
if 0:
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
def main():
parser = argparse.ArgumentParser(description='Train network.')
parser.add_argument('--settings_file', help='Path to settings yaml', required=False)
args = parser.parse_args()
settings_filepath = args.settings_file
settings = Settings(settings_filepath, generate_log=False)
# settings.batch_size=1
th = [0, 0.02, 0.04, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
print('Start evaluating thr-acc-flops relations of SparNet model on %s, ' % settings.dataset_name)
# Build trainer
if settings.model_name == 'dss_cls':
trainer = DSSClsModel(settings)
elif settings.model_name == 'dss_det':
trainer = DSSDetModel(settings)
elif settings.model_name == 'dss_exp':
trainer = ExpModel(settings)
else:
raise ValueError('Model name %s specified in the settings file is not implemented' % settings.model_name)
loadCheckpoint(trainer.model, trainer.settings.resume_ckpt_file)
trainer.model.set_train_mode((True, True, True, True))
for th_ in th:
# trainer.model.set_train_mode((False, False, True, True))
trainer.model.set_thr(th_)
if settings.dataset_name=='NMNIST':
trainer.testEpoch()
print('NMNIST, %s threshold: %.6f,trg_loss: %.6f, acc: %.6f, test_mac%.6f' % (settings.dataset_name, th_, trainer.test_tgt, trainer.test_acc, trainer.test_mac))
else:
trainer.validationEpoch()
print('%s threshold: %.6f,trg_loss: %.6f, acc: %.6f, test_mac%.6f' % (settings.dataset_name, th_, trainer.val_tgt, trainer.val_acc, trainer.val_mac))
if __name__ == "__main__":
main()
|
py | 1a4a485715809a25d94e3c30084941f94b09bb9d | import code
import os
import sys
import wx
from app.app_utils import Chronometer
from app.app_utils import GripyBitmap
from classes.ui import UIManager
from classes.ui import WorkPageController
from classes.ui import WorkPage
# TODO: rever isso... replicado em WellPlot
WP_FLOAT_PANEL = wx.NewId()
class ConsoleController(WorkPageController):
tid = 'console_controller'
_ATTRIBUTES = {
}
def __init__(self, **state):
super().__init__(**state)
class InteractiveConsole(code.InteractiveConsole):
def __init__(self, outputFunc, flushFunc, setPromptFunc, exitCmd, clearFunc, echoFunc=None):
self._output = outputFunc
self._flush = flushFunc
self._echo = echoFunc
self._setPrompt = setPromptFunc
self._exitCmd = exitCmd
self._clearFunc = clearFunc
# Can't use super here because stupid code.
# InteractiveConsole doesn't sub-class object. Grrr!
# code.InteractiveConsole.__init__(self) # , locals=self.namespace)
super().__init__(locals=None) # , filename=self._output) # locals=None, filename="<console>"
self.prompt = ">>>"
def _set_prompt(self, prompt):
self._prompt = prompt
self._setPrompt(prompt)
def _get_prompt(self):
return self._prompt
def write(self, data):
self._output(data)
def _show_error_info(self, exectype, value, tb):
msg = '\nError found! \nError type: ' + exectype.__name__ \
+ '\nDescription: ' + str(value) + '\n'
# print('Traceback:', tb)
self.write(msg)
def push(self, data):
lines = data.split('\n')
if self._echo:
for line in lines:
self._echo("%s %s\n" % (self.prompt, line))
c = Chronometer()
# Capture stdout/stderr output as well as code interaction.
stdout, stderr = sys.stdout, sys.stderr
temp_excepthook = sys.excepthook
sys.excepthook = self._show_error_info
#
sys.stdout = sys.stderr = self
for line in lines:
# more = code.InteractiveConsole.push(self, line)
more = super().push(line)
self.prompt = "..." if more else ">>>"
#
if self._echo:
self._echo("%s \n\n" % (c.end()))
#
sys.excepthook = temp_excepthook
sys.stdout, sys.stderr = stdout, stderr
def flush(self):
self._flush()
class Console(WorkPage):
tid = 'console'
_TID_FRIENDLY_NAME = 'Coding Console'
def __init__(self, controller_uid):
super().__init__(controller_uid)
# Top
self.sizer = wx.BoxSizer(wx.VERTICAL)
self._tool_bar = wx.aui.AuiToolBar(self)
self.sizer.Add(self._tool_bar, 0, flag=wx.TOP | wx.EXPAND)
# Center
self._main_panel = wx.Panel(self)
self.sizer.Add(self._main_panel, 1, flag=wx.EXPAND)
#
self.SetSizer(self.sizer)
# Then, let's construct our ToolBar
self._build_tool_bar()
# super(DebugConsoleFrame, self).__init__(parent,
# wx.ID_ANY,
# 'GRIPy Python Debug Console'
# )
# self.Bind(wx.EVT_ACTIVATE, self.onActivate)
# self.sizer = wx.BoxSizer(wx.VERTICAL)
# self._main_panel = wx.Panel(self)
# self.sizer.Add(self._main_panel, 1, flag=wx.EXPAND)
main_panel_sizer = wx.BoxSizer(wx.VERTICAL)
top_panel = wx.Panel(self._main_panel, -1)
font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Consolas')
self.outputCtrl = wx.TextCtrl(top_panel, wx.ID_ANY,
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH2
)
self.outputCtrl.Bind(wx.EVT_KEY_DOWN, self.onOutputKeyDown)
self.outputCtrl.Bind(wx.EVT_CHAR, self.onOutputChar)
output_attr = wx.TextAttr(wx.Colour(255, 0, 0), font=font)
self.outputCtrl.SetDefaultStyle(output_attr)
#
self.inputCtrl = wx.TextCtrl(top_panel, wx.ID_ANY,
style=wx.TE_RICH2 | wx.TE_MULTILINE | wx.TE_DONTWRAP | wx.TE_PROCESS_TAB
)
self.inputCtrl.Bind(wx.EVT_CHAR, self.onInputChar)
self.inputCtrl.SetFont(font)
#
top_sizer = wx.BoxSizer(wx.HORIZONTAL)
top_sizer.Add(self.inputCtrl, proportion=4, flag=wx.EXPAND)
top_sizer.Add(self.outputCtrl, proportion=4, flag=wx.EXPAND)
top_panel.SetSizer(top_sizer)
bottom_panel = wx.Panel(self._main_panel, -1)
### Begin - buttons_panel
buttons_panel = wx.Panel(bottom_panel)
self.clear_input_button = wx.Button(buttons_panel,
label='Clear input'
)
self.clear_input_button.Bind(wx.EVT_BUTTON, self.onClearInput)
self.clear_output_button = wx.Button(buttons_panel,
label='Clear output'
)
self.clear_output_button.Bind(wx.EVT_BUTTON, self.onClearOutput)
self.clear_all_button = wx.Button(buttons_panel,
label='Clear all'
)
self.clear_all_button.Bind(wx.EVT_BUTTON, self.onClearAll)
self.execute_button_selected = wx.Button(buttons_panel,
label='Excecute selected'
)
self.execute_button_selected.Bind(wx.EVT_BUTTON,
self.onExecuteSelected
)
self.execute_button_all = wx.Button(buttons_panel,
label='Excecute all'
)
self.execute_button_all.Bind(wx.EVT_BUTTON,
self.onExecuteAll
)
self.load_button = wx.Button(buttons_panel,
label='Load'
)
self.load_button.Bind(wx.EVT_BUTTON,
self.onLoadFile
)
self.save_button = wx.Button(buttons_panel,
label='Save'
)
self.save_button.Bind(wx.EVT_BUTTON,
self.onSaveFile
)
self.save_button_as = wx.Button(buttons_panel,
label='Save as'
)
self.save_button_as.Bind(wx.EVT_BUTTON,
self.onSaveFileAs
)
buttons_panel_sizer = wx.BoxSizer(wx.HORIZONTAL)
buttons_panel_sizer.Add(self.clear_input_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.clear_output_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.clear_all_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.execute_button_selected,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.execute_button_all,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.load_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.save_button,
0,
wx.ALIGN_CENTER | wx.LEFT,
10
)
buttons_panel_sizer.Add(self.save_button_as,
0,
wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT,
10
)
buttons_panel.SetSizer(buttons_panel_sizer)
buttons_panel.Layout()
### End - buttons_panel
bottom_panel_sizer = wx.BoxSizer(wx.VERTICAL)
bottom_panel_sizer.Add(buttons_panel, 1, wx.ALIGN_CENTER | wx.ALL, 2)
bottom_panel.SetSizer(bottom_panel_sizer)
bottom_panel.Layout()
main_panel_sizer.Add(top_panel, 1, wx.EXPAND)
bottom_panel.SetMinSize((40, 40))
main_panel_sizer.Add(bottom_panel, 0, wx.EXPAND)
#
self._main_panel.SetSizer(main_panel_sizer)
self.console = InteractiveConsole(outputFunc=self.output,
flushFunc=self.flush,
exitCmd=self.Close,
clearFunc=self.clearOutput,
echoFunc=self.echo,
setPromptFunc=self.setPrompt
)
# main_panel_sizer.Layout()
self.Layout()
# self.SetSize((1350,700))
# self.SetPosition((10,10))
#
self.Bind(wx.EVT_CLOSE, self.onClose)
#
gripy_app = wx.GetApp()
_fullfilename = gripy_app._gripy_app_state.get('gripy_debug_file')
_fullfilename = os.path.normpath(_fullfilename)
self.file_name = os.path.basename(_fullfilename)
self.dir_name = os.path.dirname(_fullfilename)
#
if not os.path.isdir(self.dir_name):
os.makedirs(self.dir_name)
msg = 'DebugConsoleFrame.__init__ has created directory: {}'.format(self.dir_name)
# log.debug(msg)
# print(msg)
if not os.path.isfile(_fullfilename):
open(_fullfilename, 'a').close()
msg = 'DebugConsoleFrame.__init__ has created empty file: {}'.format(_fullfilename)
# log.debug(msg)
# print (msg)
if self.file_name and self.dir_name:
self._load_file()
def get_friendly_name(self):
idx = self._get_sequence_number()
name = self._get_tid_friendly_name() \
+ ': ' + '[' + str(idx) + ']'
return name
def _build_tool_bar(self):
self.fp_item = self._tool_bar.AddTool(WP_FLOAT_PANEL,
wx.EmptyString,
GripyBitmap('restore_window-25.png'),
wx.NullBitmap,
wx.ITEM_CHECK,
'Float Panel',
'Float Panel',
None
)
self._tool_bar.ToggleTool(WP_FLOAT_PANEL, False)
self._tool_bar.Bind(wx.EVT_TOOL, self._on_change_float_panel, None,
WP_FLOAT_PANEL
)
self._tool_bar.AddSeparator()
self._tool_bar.Realize()
#
def _on_change_float_panel(self, event):
# TODO: Integrar binds de toggle buttons...
if event.GetId() == WP_FLOAT_PANEL:
UIM = UIManager()
controller = UIM.get(self._controller_uid)
controller.float_mode = event.IsChecked()
def onLoadFile(self, evt):
style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
wildcard = "Arquivo de console GRIPy (*.gripy_console)|*.gripy_console"
fdlg = wx.FileDialog(self, 'Escolha o arquivo gripy_console',
defaultDir=self.dir_name,
wildcard=wildcard,
style=style
)
if fdlg.ShowModal() == wx.ID_OK:
self.file_name = fdlg.GetFilename()
self.dir_name = fdlg.GetDirectory()
self._load_file()
fdlg.Destroy()
def _load_file(self):
self.inputCtrl.LoadFile(os.path.join(self.dir_name, self.file_name))
def onSaveFileAs(self, evt):
style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
wildcard = "Arquivo de console GRIPy (*.gripy_console)|*.gripy_console"
fdlg = wx.FileDialog(self, 'Escolha o arquivo gripy_console',
defaultDir=self.dir_name,
wildcard=wildcard,
style=style
)
if fdlg.ShowModal() == wx.ID_OK:
self.file_name = fdlg.GetFilename()
self.dir_name = fdlg.GetDirectory()
self._do_save()
fdlg.Destroy()
def onSaveFile(self, evt):
self._do_save()
def _do_save(self):
self.inputCtrl.SaveFile(os.path.join(self.dir_name, self.file_name))
def onExecuteAll(self, evt):
data = self.inputCtrl.GetValue()
data = data + '\n'
self.console.push(data)
def onExecuteSelected(self, evt):
data = self.inputCtrl.GetStringSelection()
data = data + '\n'
self.console.push(data)
def onClearInput(self, evt):
self.clearInput()
def onClearOutput(self, evt):
self.clearOutput()
def onClearAll(self, evt):
self.clearInput()
self.clearOutput()
def onActivate(self, evt):
if evt.GetActive():
self.inputCtrl.SetFocus()
evt.Skip()
def onClose(self, evt):
self._do_save()
evt.Skip()
print('\n\nonClose')
def output(self, data):
self.outputCtrl.WriteText(data)
def flush(self):
self.outputCtrl.flush()
def echo(self, data):
self.outputCtrl.WriteText(data)
def setPrompt(self, prompt):
self.promptLabel.SetLabel(prompt)
def onInputChar(self, evt):
key = evt.GetKeyCode()
if key == wx.WXK_TAB:
data = self.inputCtrl.GetValue()
ins_point = self.inputCtrl.GetInsertionPoint()
last_point = self.inputCtrl.GetLastPosition()
line_number = len(data[0:ins_point].split("\n"))
if line_number > 1:
ins_point -= line_number - 1
data = data[0:ins_point] + ' ' + data[ins_point:last_point]
self.inputCtrl.ChangeValue(data)
self.inputCtrl.SetInsertionPoint(ins_point + 3 + line_number)
return
elif key == wx.WXK_F6:
self.outputCtrl.SetFocus()
return
elif key == wx.WXK_ESCAPE:
self.Close()
return
evt.Skip()
def clearOutput(self):
self.outputCtrl.ChangeValue("")
def clearInput(self):
self.inputCtrl.ChangeValue("")
def onOutputKeyDown(self, evt):
key = evt.GetKeyCode()
# #3763: WX 3 no longer passes escape to evt_char for richEdit fields, therefore evt_key_down is used.
if key == wx.WXK_ESCAPE:
self.Close()
return
evt.Skip()
def onOutputChar(self, evt):
key = evt.GetKeyCode()
if key == wx.WXK_F6:
self.inputCtrl.SetFocus()
return
evt.Skip()
|
py | 1a4a485ecf9ec0712d8dcbb15a49b94866d0125f | import copy
import itertools
import os
import tempfile
import unittest
import numpy as np
import pytest
import torch
from torch import nn
import pfrl
from pfrl.agents import ppo
from pfrl.agents.ppo import PPO
from pfrl.envs.abc import ABC
from pfrl.experiments import (
train_agent_batch_with_evaluation,
train_agent_with_evaluation,
)
from pfrl.experiments.evaluator import (
batch_run_evaluation_episodes,
run_evaluation_episodes,
)
from pfrl.nn import RecurrentBranched, RecurrentSequential
from pfrl.policies import (
GaussianHeadWithStateIndependentCovariance,
SoftmaxCategoricalHead,
)
from pfrl.testing import torch_assert_allclose
from pfrl.utils.batch_states import batch_states
make_random_episodes = ABC.make_random_episodes
class TestYieldSubsetOfSequencesWithFixedNumberOfItems(unittest.TestCase):
def test_manual(self):
episodes = [
[1, 2, 3],
[4, 5],
[6, 7, 8],
[9],
[10, 11, 12],
]
self.assertEqual(
list(
ppo._yield_subset_of_sequences_with_fixed_number_of_items(episodes, 4)
),
[
[[1, 2, 3], [4]],
[[5], [6, 7, 8]],
[[9], [10, 11, 12]],
],
)
self.assertEqual(
list(
ppo._yield_subset_of_sequences_with_fixed_number_of_items(episodes, 3)
),
[
[[1, 2, 3]],
[[4, 5], [6]],
[[7, 8], [9]],
[[10, 11, 12]],
],
)
self.assertEqual(
list(
ppo._yield_subset_of_sequences_with_fixed_number_of_items(episodes, 2)
),
[
[[1, 2]],
[[3], [4]],
[[5], [6]],
[[7, 8]],
[[9], [10]],
[[11, 12]],
],
)
class TestLimitSequenceLength(unittest.TestCase):
def test_manual(self):
episodes = [
[1, 2, 3],
[4, 5],
[6, 7, 8],
[9],
]
self.assertEqual(
ppo._limit_sequence_length(episodes, 1),
[[1], [2], [3], [4], [5], [6], [7], [8], [9]],
)
self.assertEqual(
ppo._limit_sequence_length(episodes, 2),
[
[1, 2],
[3],
[4, 5],
[6, 7],
[8],
[9],
],
)
self.assertEqual(
ppo._limit_sequence_length(episodes, 3),
episodes,
)
self.assertEqual(
ppo._limit_sequence_length(episodes, 4),
episodes,
)
def test_random(self):
episodes = make_random_episodes()
limit = 5
new_episodes = pfrl.agents.ppo._limit_sequence_length(episodes, limit)
for ep in new_episodes:
self.assertLessEqual(len(ep), limit)
# They should have the same number of transitions
self.assertEqual(
sum(len(ep) for ep in episodes), sum(len(ep) for ep in new_episodes)
)
@pytest.mark.parametrize("use_obs_normalizer", [True, False])
@pytest.mark.parametrize("gamma", [1, 0.8, 0])
@pytest.mark.parametrize("lambd", [1, 0.8, 0])
@pytest.mark.parametrize("max_recurrent_sequence_len", [None, 7])
def test_ppo_dataset_recurrent_and_non_recurrent_equivalence(
use_obs_normalizer, gamma, lambd, max_recurrent_sequence_len
):
"""Test equivalence between recurrent and non-recurrent datasets.
When the same feed-forward model is used, the values of
log_prob, v_pred, next_v_pred obtained by both recurrent and
non-recurrent dataset creation functions should be the same.
"""
episodes = make_random_episodes()
if use_obs_normalizer:
obs_normalizer = pfrl.nn.EmpiricalNormalization(2, clip_threshold=5)
obs_normalizer.experience(torch.rand(10, 2))
else:
obs_normalizer = None
def phi(obs):
return (obs * 0.5).astype(np.float32)
device = torch.device("cpu")
obs_size = 2
n_actions = 3
non_recurrent_model = pfrl.nn.Branched(
nn.Sequential(
nn.Linear(obs_size, n_actions),
SoftmaxCategoricalHead(),
),
nn.Linear(obs_size, 1),
)
recurrent_model = RecurrentSequential(
non_recurrent_model,
)
dataset = pfrl.agents.ppo._make_dataset(
episodes=copy.deepcopy(episodes),
model=non_recurrent_model,
phi=phi,
batch_states=batch_states,
obs_normalizer=obs_normalizer,
gamma=gamma,
lambd=lambd,
device=device,
)
dataset_recurrent = pfrl.agents.ppo._make_dataset_recurrent(
episodes=copy.deepcopy(episodes),
model=recurrent_model,
phi=phi,
batch_states=batch_states,
obs_normalizer=obs_normalizer,
gamma=gamma,
lambd=lambd,
max_recurrent_sequence_len=max_recurrent_sequence_len,
device=device,
)
assert "log_prob" not in episodes[0][0]
assert "log_prob" in dataset[0]
assert "log_prob" in dataset_recurrent[0][0]
# They are not just shallow copies
assert dataset[0]["log_prob"] is not dataset_recurrent[0][0]["log_prob"]
states = [tr["state"] for tr in dataset]
recurrent_states = [
tr["state"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(states, recurrent_states)
actions = [tr["action"] for tr in dataset]
recurrent_actions = [
tr["action"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(actions, recurrent_actions)
rewards = [tr["reward"] for tr in dataset]
recurrent_rewards = [
tr["reward"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(rewards, recurrent_rewards)
nonterminals = [tr["nonterminal"] for tr in dataset]
recurrent_nonterminals = [
tr["nonterminal"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(nonterminals, recurrent_nonterminals)
log_probs = [tr["log_prob"] for tr in dataset]
recurrent_log_probs = [
tr["log_prob"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(log_probs, recurrent_log_probs)
vs_pred = [tr["v_pred"] for tr in dataset]
recurrent_vs_pred = [
tr["v_pred"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(vs_pred, recurrent_vs_pred)
next_vs_pred = [tr["next_v_pred"] for tr in dataset]
recurrent_next_vs_pred = [
tr["next_v_pred"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(next_vs_pred, recurrent_next_vs_pred)
advs = [tr["adv"] for tr in dataset]
recurrent_advs = [
tr["adv"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(advs, recurrent_advs)
vs_teacher = [tr["v_teacher"] for tr in dataset]
recurrent_vs_teacher = [
tr["v_teacher"] for tr in itertools.chain.from_iterable(dataset_recurrent)
]
torch_assert_allclose(vs_teacher, recurrent_vs_teacher)
class _TestPPO:
@pytest.fixture(autouse=True)
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.agent_dirname = os.path.join(self.tmpdir, "agent_final")
@pytest.mark.slow
def test_abc_cpu(self):
self._test_abc()
self._test_abc(steps=0, load_model=True)
@pytest.mark.slow
@pytest.mark.gpu
def test_abc_gpu(self):
self._test_abc(gpu=0)
def test_abc_fast_cpu(self):
self._test_abc(steps=100, require_success=False)
self._test_abc(steps=0, require_success=False, load_model=True)
@pytest.mark.gpu
def test_abc_fast_gpu(self):
self._test_abc(steps=100, require_success=False, gpu=0)
@pytest.mark.slow
def test_abc_batch_cpu(self):
self._test_abc_batch()
self._test_abc_batch(steps=0, load_model=True)
@pytest.mark.slow
@pytest.mark.gpu
def test_abc_batch_gpu(self):
self._test_abc_batch(gpu=0)
def test_abc_batch_fast_cpu(self):
self._test_abc_batch(steps=100, require_success=False)
self._test_abc_batch(steps=0, require_success=False, load_model=True)
@pytest.mark.gpu
def test_abc_batch_fast_gpu(self):
self._test_abc_batch(steps=100, require_success=False, gpu=0)
def _test_abc(self, steps=100000, require_success=True, gpu=-1, load_model=False):
env, _ = self.make_env_and_successful_return(test=False)
test_env, successful_return = self.make_env_and_successful_return(test=True)
agent = self.make_agent(env, gpu)
max_episode_len = None if self.episodic else 2
if load_model:
print("Load agent from", self.agent_dirname)
agent.load(self.agent_dirname)
# Train
train_agent_with_evaluation(
agent=agent,
env=env,
steps=steps,
outdir=self.tmpdir,
eval_interval=200,
eval_n_steps=None,
eval_n_episodes=50,
successful_score=successful_return,
eval_env=test_env,
train_max_episode_len=max_episode_len,
)
# Test
n_test_runs = 10
eval_returns, _ = run_evaluation_episodes(
test_env,
agent,
n_steps=None,
n_episodes=n_test_runs,
max_episode_len=max_episode_len,
)
if require_success:
n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return)
assert n_succeeded == n_test_runs
# Save
agent.save(self.agent_dirname)
def _test_abc_batch(
self, steps=100000, require_success=True, gpu=-1, load_model=False, num_envs=4
):
env, _ = self.make_vec_env_and_successful_return(test=False, num_envs=num_envs)
test_env, successful_return = self.make_vec_env_and_successful_return(
test=True, num_envs=num_envs
)
agent = self.make_agent(env, gpu)
max_episode_len = None if self.episodic else 2
if load_model:
print("Load agent from", self.agent_dirname)
agent.load(self.agent_dirname)
# Train
train_agent_batch_with_evaluation(
agent=agent,
env=env,
steps=steps,
outdir=self.tmpdir,
eval_interval=200,
eval_n_steps=None,
eval_n_episodes=40,
successful_score=successful_return,
eval_env=test_env,
log_interval=100,
max_episode_len=max_episode_len,
)
env.close()
# Test
n_test_runs = 10
eval_returns, _ = batch_run_evaluation_episodes(
test_env,
agent,
n_steps=None,
n_episodes=n_test_runs,
max_episode_len=max_episode_len,
)
test_env.close()
if require_success:
n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return)
assert n_succeeded == n_test_runs
# Save
agent.save(self.agent_dirname)
def make_agent(self, env, gpu):
model = self.make_model(env)
opt = torch.optim.Adam(model.parameters(), lr=1e-2)
return self.make_ppo_agent(env=env, model=model, opt=opt, gpu=gpu)
def make_ppo_agent(self, env, model, opt, gpu):
return PPO(
model,
opt,
gpu=gpu,
gamma=0.8,
lambd=self.lambd,
update_interval=64,
minibatch_size=16,
epochs=3,
clip_eps_vf=self.clip_eps_vf,
standardize_advantages=self.standardize_advantages,
recurrent=self.recurrent,
entropy_coef=1e-5,
act_deterministically=True,
max_grad_norm=1.0,
)
def make_model(self, env):
hidden_size = 20
obs_size = env.observation_space.low.size
def weight_scale(layer, scale):
with torch.no_grad():
layer.weight.mul_(scale)
return layer
if self.recurrent:
v = RecurrentSequential(
nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),
weight_scale(nn.Linear(hidden_size, 1), 1e-1),
)
if self.discrete:
n_actions = env.action_space.n
pi = RecurrentSequential(
nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),
weight_scale(nn.Linear(hidden_size, n_actions), 1e-1),
SoftmaxCategoricalHead(),
)
else:
action_size = env.action_space.low.size
pi = RecurrentSequential(
nn.LSTM(num_layers=1, input_size=obs_size, hidden_size=hidden_size),
weight_scale(nn.Linear(hidden_size, action_size), 1e-1),
GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type="diagonal",
var_func=lambda x: torch.exp(2 * x),
var_param_init=0,
),
)
return RecurrentBranched(pi, v)
else:
v = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.Tanh(),
weight_scale(nn.Linear(hidden_size, 1), 1e-1),
)
if self.discrete:
n_actions = env.action_space.n
pi = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.Tanh(),
weight_scale(nn.Linear(hidden_size, n_actions), 1e-1),
SoftmaxCategoricalHead(),
)
else:
action_size = env.action_space.low.size
pi = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.Tanh(),
weight_scale(nn.Linear(hidden_size, action_size), 1e-1),
GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type="diagonal",
var_func=lambda x: torch.exp(2 * x),
var_param_init=0,
),
)
return pfrl.nn.Branched(pi, v)
def make_env_and_successful_return(self, test):
env = ABC(
discrete=self.discrete,
deterministic=test,
episodic=self.episodic,
partially_observable=self.recurrent,
)
return env, 1.0
def make_vec_env_and_successful_return(self, test, num_envs=3):
def make_env():
return self.make_env_and_successful_return(test)[0]
vec_env = pfrl.envs.MultiprocessVectorEnv([make_env for _ in range(num_envs)])
return vec_env, 1.0
@pytest.mark.parametrize("clip_eps_vf", [None, 0.2])
@pytest.mark.parametrize("lambd", [0.0, 0.5])
@pytest.mark.parametrize("discrete", [False, True])
@pytest.mark.parametrize("standardize_advantages", [False, True])
@pytest.mark.parametrize("episodic", [True, False])
class TestPPONonRecurrent(_TestPPO):
@pytest.fixture(autouse=True)
def set_params(
self,
clip_eps_vf,
lambd,
discrete,
standardize_advantages,
episodic,
):
self.clip_eps_vf = clip_eps_vf
self.lambd = lambd
self.discrete = discrete
self.standardize_advantages = standardize_advantages
self.episodic = episodic
self.recurrent = False
@pytest.mark.parametrize("clip_eps_vf", [0.2])
@pytest.mark.parametrize("lambd", [0.0, 0.5])
@pytest.mark.parametrize("discrete", [False, True])
@pytest.mark.parametrize("standardize_advantages", [True])
@pytest.mark.parametrize("episodic", [True, False])
class TestPPORecurrent(_TestPPO):
@pytest.fixture(autouse=True)
def set_params(
self,
clip_eps_vf,
lambd,
discrete,
standardize_advantages,
episodic,
):
self.clip_eps_vf = clip_eps_vf
self.lambd = lambd
self.discrete = discrete
self.standardize_advantages = standardize_advantages
self.episodic = episodic
self.recurrent = True
def test_yield_minibatches_divisible():
dataset = [1, 2, 3, 4]
minibatches = list(ppo._yield_minibatches(dataset, minibatch_size=2, num_epochs=3))
assert len(minibatches) == 6
samples = sum(minibatches, [])
assert len(samples) == 12
assert {1, 2, 3, 4} == set(samples[:4])
assert {1, 2, 3, 4} == set(samples[4:8])
assert {1, 2, 3, 4} == set(samples[8:12])
def test_yield_minibatches_indivisible():
dataset = [1, 2, 3]
minibatches = list(ppo._yield_minibatches(dataset, minibatch_size=2, num_epochs=3))
assert len(minibatches) == 5
samples = sum(minibatches, [])
assert len(samples) == 10
# samples[:6] is from the first two epochs
assert samples[:6].count(1) == 2
assert samples[:6].count(2) == 2
assert samples[:6].count(3) == 2
# samples[6:] is from the final epoch
assert 1 <= samples[6:].count(1) <= 2
assert 1 <= samples[6:].count(2) <= 2
assert 1 <= samples[6:].count(3) <= 2
def test_yield_minibatches_smaller_dataset():
# dataset smaller than minibatch
dataset = [1, 2]
minibatches = list(ppo._yield_minibatches(dataset, minibatch_size=4, num_epochs=3))
assert len(minibatches) == 2
samples = sum(minibatches, [])
assert len(samples) == 8
assert samples.count(1) == 4
assert samples.count(2) == 4
|
py | 1a4a48fe5222188308d77815cac10833d463a615 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
---------------------------------------------------------------------------------------------
#查看函数或模块用途的详细说明
---------------------------------------------------------------------------------------------
#查看方法帮助
help('dir')
help(dir)
---------------------------------------------------------------------------------------------
#查看模块帮助
help('sys')
---------------------------------------------------------------------------------------------
# 查看方法帮助
a = [1, 2, 3]
help(a)
help(a.append)
---------------------------------------------------------------------------------------------
#查看安装了哪些模块
help('modules')
---------------------------------------------------------------------------------------------
# help(“modules”)可能需要很长时间,因为它必须导入每个模块才能搜索该模块的子模块路径。
# 如果任何模块在if __name__ ==“__main__”之外的代码:guard,并且如果该代码期望用户输入或进入无限循环或因任何其他原因挂起,这可能是一个问题
---------------------------------------------------------------------------------------------
"""
|
py | 1a4a492d2fbe24a9aca2099b74ee34ef222b729c | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import functools as ft
import itertools
import os
import re
import shutil
import sys
from llnl.util import tty
from llnl.util.compat import filter, map, zip
from llnl.util.filesystem import (
mkdirp,
remove_dead_links,
remove_empty_directories,
visit_directory_tree,
)
from llnl.util.lang import index_by, match_predicate
from llnl.util.link_tree import (
DestinationMergeVisitor,
LinkTree,
MergeConflictSummary,
SingleMergeConflictError,
SourceMergeVisitor,
)
from llnl.util.symlink import symlink
from llnl.util.tty.color import colorize
import spack.config
import spack.projections
import spack.schema.projections
import spack.spec
import spack.store
import spack.util.spack_json as s_json
import spack.util.spack_yaml as s_yaml
from spack.directory_layout import (
ExtensionAlreadyInstalledError,
YamlViewExtensionsLayout,
)
from spack.error import SpackError
__all__ = ["FilesystemView", "YamlFilesystemView"]
_projections_path = '.spack/projections.yaml'
def view_symlink(src, dst, **kwargs):
# keyword arguments are irrelevant
# here to fit required call signature
symlink(src, dst)
def view_hardlink(src, dst, **kwargs):
# keyword arguments are irrelevant
# here to fit required call signature
os.link(src, dst)
def view_copy(src, dst, view, spec=None):
"""
Copy a file from src to dst.
Use spec and view to generate relocations
"""
shutil.copy2(src, dst)
if spec and not spec.external:
# Not metadata, we have to relocate it
# Get information on where to relocate from/to
# This is vestigial code for the *old* location of sbang. Previously,
# sbang was a bash script, and it lived in the spack prefix. It is
# now a POSIX script that lives in the install prefix. Old packages
# will have the old sbang location in their shebangs.
# TODO: Not sure which one to use...
import spack.hooks.sbang as sbang
# Break a package include cycle
import spack.relocate
orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(spack.paths.spack_root)
new_sbang = sbang.sbang_shebang_line()
prefix_to_projection = collections.OrderedDict({
spec.prefix: view.get_projection_for_spec(spec)})
for dep in spec.traverse():
if not dep.external:
prefix_to_projection[dep.prefix] = \
view.get_projection_for_spec(dep)
if spack.relocate.is_binary(dst):
spack.relocate.relocate_text_bin(
binaries=[dst],
prefixes=prefix_to_projection
)
else:
prefix_to_projection[spack.store.layout.root] = view._root
prefix_to_projection[orig_sbang] = new_sbang
spack.relocate.relocate_text(
files=[dst],
prefixes=prefix_to_projection
)
try:
stat = os.stat(src)
os.chown(dst, stat.st_uid, stat.st_gid)
except OSError:
tty.debug('Can\'t change the permissions for %s' % dst)
def view_func_parser(parsed_name):
# What method are we using for this view
if parsed_name in ("hardlink", "hard"):
return view_hardlink
elif parsed_name in ("copy", "relocate"):
return view_copy
elif parsed_name in ("add", "symlink", "soft"):
return view_symlink
else:
raise ValueError("invalid link type for view: '%s'" % parsed_name)
def inverse_view_func_parser(view_type):
# get string based on view type
if view_type is view_hardlink:
link_name = 'hardlink'
elif view_type is view_copy:
link_name = 'copy'
else:
link_name = 'symlink'
return link_name
class FilesystemView(object):
"""
Governs a filesystem view that is located at certain root-directory.
Packages are linked from their install directories into a common file
hierachy.
In distributed filesystems, loading each installed package seperately
can lead to slow-downs due to too many directories being traversed.
This can be circumvented by loading all needed modules into a common
directory structure.
"""
def __init__(self, root, layout, **kwargs):
"""
Initialize a filesystem view under the given `root` directory with
corresponding directory `layout`.
Files are linked by method `link` (llnl.util.symlink by default).
"""
self._root = root
self.layout = layout
self.projections = kwargs.get('projections', {})
self.ignore_conflicts = kwargs.get("ignore_conflicts", False)
self.verbose = kwargs.get("verbose", False)
# Setup link function to include view
link_func = kwargs.get("link", view_symlink)
self.link = ft.partial(link_func, view=self)
def add_specs(self, *specs, **kwargs):
"""
Add given specs to view.
The supplied specs might be standalone packages or extensions of
other packages.
Should accept `with_dependencies` as keyword argument (default
True) to indicate wether or not dependencies should be activated as
well.
Should except an `exclude` keyword argument containing a list of
regexps that filter out matching spec names.
This method should make use of `activate_{extension,standalone}`.
"""
raise NotImplementedError
def add_extension(self, spec):
"""
Add (link) an extension in this view. Does not add dependencies.
"""
raise NotImplementedError
def add_standalone(self, spec):
"""
Add (link) a standalone package into this view.
"""
raise NotImplementedError
def check_added(self, spec):
"""
Check if the given concrete spec is active in this view.
"""
raise NotImplementedError
def remove_specs(self, *specs, **kwargs):
"""
Removes given specs from view.
The supplied spec might be a standalone package or an extension of
another package.
Should accept `with_dependencies` as keyword argument (default
True) to indicate wether or not dependencies should be deactivated
as well.
Should accept `with_dependents` as keyword argument (default True)
to indicate wether or not dependents on the deactivated specs
should be removed as well.
Should except an `exclude` keyword argument containing a list of
regexps that filter out matching spec names.
This method should make use of `deactivate_{extension,standalone}`.
"""
raise NotImplementedError
def remove_extension(self, spec):
"""
Remove (unlink) an extension from this view.
"""
raise NotImplementedError
def remove_standalone(self, spec):
"""
Remove (unlink) a standalone package from this view.
"""
raise NotImplementedError
def get_projection_for_spec(self, spec):
"""
Get the projection in this view for a spec.
"""
raise NotImplementedError
def get_all_specs(self):
"""
Get all specs currently active in this view.
"""
raise NotImplementedError
def get_spec(self, spec):
"""
Return the actual spec linked in this view (i.e. do not look it up
in the database by name).
`spec` can be a name or a spec from which the name is extracted.
As there can only be a single version active for any spec the name
is enough to identify the spec in the view.
If no spec is present, returns None.
"""
raise NotImplementedError
def print_status(self, *specs, **kwargs):
"""
Print a short summary about the given specs, detailing whether..
* ..they are active in the view.
* ..they are active but the activated version differs.
* ..they are not activte in the view.
Takes `with_dependencies` keyword argument so that the status of
dependencies is printed as well.
"""
raise NotImplementedError
class YamlFilesystemView(FilesystemView):
"""
Filesystem view to work with a yaml based directory layout.
"""
def __init__(self, root, layout, **kwargs):
super(YamlFilesystemView, self).__init__(root, layout, **kwargs)
# Super class gets projections from the kwargs
# YAML specific to get projections from YAML file
self.projections_path = os.path.join(self._root, _projections_path)
if not self.projections:
# Read projections file from view
self.projections = self.read_projections()
elif not os.path.exists(self.projections_path):
# Write projections file to new view
self.write_projections()
else:
# Ensure projections are the same from each source
# Read projections file from view
if self.projections != self.read_projections():
msg = 'View at %s has projections file' % self._root
msg += ' which does not match projections passed manually.'
raise ConflictingProjectionsError(msg)
self.extensions_layout = YamlViewExtensionsLayout(self, layout)
self._croot = colorize_root(self._root) + " "
def write_projections(self):
if self.projections:
mkdirp(os.path.dirname(self.projections_path))
with open(self.projections_path, 'w') as f:
f.write(s_yaml.dump_config({'projections': self.projections}))
def read_projections(self):
if os.path.exists(self.projections_path):
with open(self.projections_path, 'r') as f:
projections_data = s_yaml.load(f)
spack.config.validate(projections_data,
spack.schema.projections.schema)
return projections_data['projections']
else:
return {}
def add_specs(self, *specs, **kwargs):
assert all((s.concrete for s in specs))
specs = set(specs)
if kwargs.get("with_dependencies", True):
specs.update(get_dependencies(specs))
if kwargs.get("exclude", None):
specs = set(filter_exclude(specs, kwargs["exclude"]))
conflicts = self.get_conflicts(*specs)
if conflicts:
for s, v in conflicts:
self.print_conflict(v, s)
return
extensions = set(filter(lambda s: s.package.is_extension, specs))
standalones = specs - extensions
set(map(self._check_no_ext_conflicts, extensions))
# fail on first error, otherwise link extensions as well
if all(map(self.add_standalone, standalones)):
all(map(self.add_extension, extensions))
def add_extension(self, spec):
if not spec.package.is_extension:
tty.error(self._croot + 'Package %s is not an extension.'
% spec.name)
return False
if spec.external:
tty.warn(self._croot + 'Skipping external package: %s'
% colorize_spec(spec))
return True
if not spec.package.is_activated(self):
spec.package.do_activate(
self, verbose=self.verbose, with_dependencies=False)
# make sure the meta folder is linked as well (this is not done by the
# extension-activation mechnism)
if not self.check_added(spec):
self.link_meta_folder(spec)
return True
def add_standalone(self, spec):
if spec.package.is_extension:
tty.error(self._croot + 'Package %s is an extension.'
% spec.name)
return False
if spec.external:
tty.warn(self._croot + 'Skipping external package: %s'
% colorize_spec(spec))
return True
if self.check_added(spec):
tty.warn(self._croot + 'Skipping already linked package: %s'
% colorize_spec(spec))
return True
if spec.package.extendable:
# Check for globally activated extensions in the extendee that
# we're looking at.
activated = [p.spec for p in
spack.store.db.activated_extensions_for(spec)]
if activated:
tty.error("Globally activated extensions cannot be used in "
"conjunction with filesystem views. "
"Please deactivate the following specs: ")
spack.cmd.display_specs(activated, flags=True, variants=True,
long=False)
return False
self.merge(spec)
self.link_meta_folder(spec)
if self.verbose:
tty.info(self._croot + 'Linked package: %s' % colorize_spec(spec))
return True
def merge(self, spec, ignore=None):
pkg = spec.package
view_source = pkg.view_source()
view_dst = pkg.view_destination(self)
tree = LinkTree(view_source)
ignore = ignore or (lambda f: False)
ignore_file = match_predicate(
self.layout.hidden_file_regexes, ignore)
# check for dir conflicts
conflicts = tree.find_dir_conflicts(view_dst, ignore_file)
merge_map = tree.get_file_map(view_dst, ignore_file)
if not self.ignore_conflicts:
conflicts.extend(pkg.view_file_conflicts(self, merge_map))
if conflicts:
raise SingleMergeConflictError(conflicts[0])
# merge directories with the tree
tree.merge_directories(view_dst, ignore_file)
pkg.add_files_to_view(self, merge_map)
def unmerge(self, spec, ignore=None):
pkg = spec.package
view_source = pkg.view_source()
view_dst = pkg.view_destination(self)
tree = LinkTree(view_source)
ignore = ignore or (lambda f: False)
ignore_file = match_predicate(
self.layout.hidden_file_regexes, ignore)
merge_map = tree.get_file_map(view_dst, ignore_file)
pkg.remove_files_from_view(self, merge_map)
# now unmerge the directory tree
tree.unmerge_directories(view_dst, ignore_file)
def remove_files(self, files):
def needs_file(spec, file):
# convert the file we want to remove to a source in this spec
projection = self.get_projection_for_spec(spec)
relative_path = os.path.relpath(file, projection)
test_path = os.path.join(spec.prefix, relative_path)
# check if this spec owns a file of that name (through the
# manifest in the metadata dir, which we have in the view).
manifest_file = os.path.join(self.get_path_meta_folder(spec),
spack.store.layout.manifest_file_name)
try:
with open(manifest_file, 'r') as f:
manifest = s_json.load(f)
except (OSError, IOError):
# if we can't load it, assume it doesn't know about the file.
manifest = {}
return test_path in manifest
specs = self.get_all_specs()
for file in files:
if not os.path.lexists(file):
tty.warn("Tried to remove %s which does not exist" % file)
continue
# remove if file is not owned by any other package in the view
# This will only be false if two packages are merged into a prefix
# and have a conflicting file
# check all specs for whether they own the file. That include the spec
# we are currently removing, as we remove files before unlinking the
# metadata directory.
if len([s for s in specs if needs_file(s, file)]) <= 1:
tty.debug("Removing file " + file)
os.remove(file)
def check_added(self, spec):
assert spec.concrete
return spec == self.get_spec(spec)
def remove_specs(self, *specs, **kwargs):
assert all((s.concrete for s in specs))
with_dependents = kwargs.get("with_dependents", True)
with_dependencies = kwargs.get("with_dependencies", False)
# caller can pass this in, as get_all_specs() is expensive
all_specs = kwargs.get("all_specs", None) or set(self.get_all_specs())
specs = set(specs)
if with_dependencies:
specs = get_dependencies(specs)
if kwargs.get("exclude", None):
specs = set(filter_exclude(specs, kwargs["exclude"]))
to_deactivate = specs
to_keep = all_specs - to_deactivate
dependents = find_dependents(to_keep, to_deactivate)
if with_dependents:
# remove all packages depending on the ones to remove
if len(dependents) > 0:
tty.warn(self._croot +
"The following dependents will be removed: %s"
% ", ".join((s.name for s in dependents)))
to_deactivate.update(dependents)
elif len(dependents) > 0:
tty.warn(self._croot +
"The following packages will be unusable: %s"
% ", ".join((s.name for s in dependents)))
# Determine the order that packages should be removed from the view;
# dependents come before their dependencies.
to_deactivate_sorted = list()
depmap = dict()
for spec in to_deactivate:
depmap[spec] = set(d for d in spec.traverse(root=False)
if d in to_deactivate)
while depmap:
for spec in [s for s, d in depmap.items() if not d]:
to_deactivate_sorted.append(spec)
for s in depmap.keys():
depmap[s].discard(spec)
depmap.pop(spec)
to_deactivate_sorted.reverse()
# Ensure that the sorted list contains all the packages
assert set(to_deactivate_sorted) == to_deactivate
# Remove the packages from the view
for spec in to_deactivate_sorted:
if spec.package.is_extension:
self.remove_extension(spec, with_dependents=with_dependents)
else:
self.remove_standalone(spec)
self._purge_empty_directories()
def remove_extension(self, spec, with_dependents=True):
"""
Remove (unlink) an extension from this view.
"""
if not self.check_added(spec):
tty.warn(self._croot +
'Skipping package not linked in view: %s' % spec.name)
return
if spec.package.is_activated(self):
spec.package.do_deactivate(
self,
verbose=self.verbose,
remove_dependents=with_dependents)
self.unlink_meta_folder(spec)
def remove_standalone(self, spec):
"""
Remove (unlink) a standalone package from this view.
"""
if not self.check_added(spec):
tty.warn(self._croot +
'Skipping package not linked in view: %s' % spec.name)
return
self.unmerge(spec)
self.unlink_meta_folder(spec)
if self.verbose:
tty.info(self._croot + 'Removed package: %s' % colorize_spec(spec))
def get_projection_for_spec(self, spec):
"""
Return the projection for a spec in this view.
Relies on the ordering of projections to avoid ambiguity.
"""
spec = spack.spec.Spec(spec)
# Extensions are placed by their extendee, not by their own spec
locator_spec = spec
if spec.package.extendee_spec:
locator_spec = spec.package.extendee_spec
proj = spack.projections.get_projection(self.projections, locator_spec)
if proj:
return os.path.join(self._root, locator_spec.format(proj))
return self._root
def get_all_specs(self):
md_dirs = []
for root, dirs, files in os.walk(self._root):
if spack.store.layout.metadata_dir in dirs:
md_dirs.append(os.path.join(root,
spack.store.layout.metadata_dir))
specs = []
for md_dir in md_dirs:
if os.path.exists(md_dir):
for name_dir in os.listdir(md_dir):
filename = os.path.join(md_dir, name_dir,
spack.store.layout.spec_file_name)
spec = get_spec_from_file(filename)
if spec:
specs.append(spec)
return specs
def get_conflicts(self, *specs):
"""
Return list of tuples (<spec>, <spec in view>) where the spec
active in the view differs from the one to be activated.
"""
in_view = map(self.get_spec, specs)
return [(s, v) for s, v in zip(specs, in_view)
if v is not None and s != v]
def get_path_meta_folder(self, spec):
"Get path to meta folder for either spec or spec name."
return os.path.join(self.get_projection_for_spec(spec),
spack.store.layout.metadata_dir,
getattr(spec, "name", spec))
def get_spec(self, spec):
dotspack = self.get_path_meta_folder(spec)
filename = os.path.join(dotspack,
spack.store.layout.spec_file_name)
return get_spec_from_file(filename)
def link_meta_folder(self, spec):
src = spack.store.layout.metadata_path(spec)
tgt = self.get_path_meta_folder(spec)
tree = LinkTree(src)
# there should be no conflicts when linking the meta folder
tree.merge(tgt, link=self.link)
def print_conflict(self, spec_active, spec_specified, level="error"):
"Singular print function for spec conflicts."
cprint = getattr(tty, level)
color = sys.stdout.isatty()
linked = tty.color.colorize(" (@gLinked@.)", color=color)
specified = tty.color.colorize("(@rSpecified@.)", color=color)
cprint(self._croot + "Package conflict detected:\n"
"%s %s\n" % (linked, colorize_spec(spec_active)) +
"%s %s" % (specified, colorize_spec(spec_specified)))
def print_status(self, *specs, **kwargs):
if kwargs.get("with_dependencies", False):
specs = set(get_dependencies(specs))
specs = sorted(specs, key=lambda s: s.name)
in_view = list(map(self.get_spec, specs))
for s, v in zip(specs, in_view):
if not v:
tty.error(self._croot +
'Package not linked: %s' % s.name)
elif s != v:
self.print_conflict(v, s, level="warn")
in_view = list(filter(None, in_view))
if len(specs) > 0:
tty.msg("Packages linked in %s:" % self._croot[:-1])
# Make a dict with specs keyed by architecture and compiler.
index = index_by(specs, ('architecture', 'compiler'))
# Traverse the index and print out each package
for i, (architecture, compiler) in enumerate(sorted(index)):
if i > 0:
print()
header = "%s{%s} / %s{%s}" % (spack.spec.architecture_color,
architecture,
spack.spec.compiler_color,
compiler)
tty.hline(colorize(header), char='-')
specs = index[(architecture, compiler)]
specs.sort()
format_string = '{name}{@version}'
format_string += '{%compiler}{compiler_flags}{variants}'
abbreviated = [s.cformat(format_string) for s in specs]
# Print one spec per line along with prefix path
width = max(len(s) for s in abbreviated)
width += 2
format = " %%-%ds%%s" % width
for abbrv, s in zip(abbreviated, specs):
prefix = ''
if self.verbose:
prefix = colorize('@K{%s}' % s.dag_hash(7))
print(
prefix + (format % (abbrv,
self.get_projection_for_spec(s)))
)
else:
tty.warn(self._croot + "No packages found.")
def _purge_empty_directories(self):
remove_empty_directories(self._root)
def _purge_broken_links(self):
remove_dead_links(self._root)
def clean(self):
self._purge_broken_links()
self._purge_empty_directories()
def unlink_meta_folder(self, spec):
path = self.get_path_meta_folder(spec)
assert os.path.exists(path)
shutil.rmtree(path)
def _check_no_ext_conflicts(self, spec):
"""
Check that there is no extension conflict for specs.
"""
extendee = spec.package.extendee_spec
try:
self.extensions_layout.check_extension_conflict(extendee, spec)
except ExtensionAlreadyInstalledError:
# we print the warning here because later on the order in which
# packages get activated is not clear (set-sorting)
tty.warn(self._croot +
'Skipping already activated package: %s' % spec.name)
class SimpleFilesystemView(FilesystemView):
"""A simple and partial implementation of FilesystemView focused on
performance and immutable views, where specs cannot be removed after they
were added."""
def __init__(self, root, layout, **kwargs):
super(SimpleFilesystemView, self).__init__(root, layout, **kwargs)
def add_specs(self, *specs, **kwargs):
assert all((s.concrete for s in specs))
if len(specs) == 0:
return
# Drop externals
for s in specs:
if s.external:
tty.warn('Skipping external package: ' + s.short_spec)
specs = [s for s in specs if not s.external]
if kwargs.get("exclude", None):
specs = set(filter_exclude(specs, kwargs["exclude"]))
# Ignore spack meta data folder.
def skip_list(file):
return os.path.basename(file) == spack.store.layout.metadata_dir
visitor = SourceMergeVisitor(ignore=skip_list)
# Gather all the directories to be made and files to be linked
for spec in specs:
src_prefix = spec.package.view_source()
visitor.set_projection(self.get_relative_projection_for_spec(spec))
visit_directory_tree(src_prefix, visitor)
# Check for conflicts in destination dir.
visit_directory_tree(self._root, DestinationMergeVisitor(visitor))
# Throw on fatal dir-file conflicts.
if visitor.fatal_conflicts:
raise MergeConflictSummary(visitor.fatal_conflicts)
# Inform about file-file conflicts.
if visitor.file_conflicts:
if self.ignore_conflicts:
tty.debug("{0} file conflicts".format(len(visitor.file_conflicts)))
else:
raise MergeConflictSummary(visitor.file_conflicts)
tty.debug("Creating {0} dirs and {1} links".format(
len(visitor.directories),
len(visitor.files)))
# Make the directory structure
for dst in visitor.directories:
os.mkdir(os.path.join(self._root, dst))
# Then group the files to be linked by spec...
# For compatibility, we have to create a merge_map dict mapping
# full_src => full_dst
files_per_spec = itertools.groupby(
visitor.files.items(), key=lambda item: item[1][0])
for (spec, (src_root, rel_paths)) in zip(specs, files_per_spec):
merge_map = dict()
for dst_rel, (_, src_rel) in rel_paths:
full_src = os.path.join(src_root, src_rel)
full_dst = os.path.join(self._root, dst_rel)
merge_map[full_src] = full_dst
spec.package.add_files_to_view(self, merge_map, skip_if_exists=False)
# Finally create the metadata dirs.
self.link_metadata(specs)
def link_metadata(self, specs):
metadata_visitor = SourceMergeVisitor()
for spec in specs:
src_prefix = os.path.join(
spec.package.view_source(),
spack.store.layout.metadata_dir)
proj = os.path.join(
self.get_relative_projection_for_spec(spec),
spack.store.layout.metadata_dir,
spec.name)
metadata_visitor.set_projection(proj)
visit_directory_tree(src_prefix, metadata_visitor)
# Check for conflicts in destination dir.
visit_directory_tree(self._root, DestinationMergeVisitor(metadata_visitor))
# Throw on dir-file conflicts -- unlikely, but who knows.
if metadata_visitor.fatal_conflicts:
raise MergeConflictSummary(metadata_visitor.fatal_conflicts)
# We are strict here for historical reasons
if metadata_visitor.file_conflicts:
raise MergeConflictSummary(metadata_visitor.file_conflicts)
for dst in metadata_visitor.directories:
os.mkdir(os.path.join(self._root, dst))
for dst_relpath, (src_root, src_relpath) in metadata_visitor.files.items():
self.link(os.path.join(src_root, src_relpath),
os.path.join(self._root, dst_relpath))
def get_relative_projection_for_spec(self, spec):
# Extensions are placed by their extendee, not by their own spec
if spec.package.extendee_spec:
spec = spec.package.extendee_spec
p = spack.projections.get_projection(self.projections, spec)
return spec.format(p) if p else ''
def get_projection_for_spec(self, spec):
"""
Return the projection for a spec in this view.
Relies on the ordering of projections to avoid ambiguity.
"""
spec = spack.spec.Spec(spec)
# Extensions are placed by their extendee, not by their own spec
locator_spec = spec
if spec.package.extendee_spec:
locator_spec = spec.package.extendee_spec
proj = spack.projections.get_projection(self.projections, locator_spec)
if proj:
return os.path.join(self._root, locator_spec.format(proj))
return self._root
#####################
# utility functions #
#####################
def get_spec_from_file(filename):
try:
with open(filename, "r") as f:
return spack.spec.Spec.from_yaml(f)
except IOError:
return None
def colorize_root(root):
colorize = ft.partial(tty.color.colorize, color=sys.stdout.isatty())
pre, post = map(colorize, "@M[@. @M]@.".split())
return "".join([pre, root, post])
def colorize_spec(spec):
"Colorize spec output if in TTY."
if sys.stdout.isatty():
return spec.cshort_spec
else:
return spec.short_spec
def find_dependents(all_specs, providers, deptype='run'):
"""
Return a set containing all those specs from all_specs that depend on
providers at the given dependency type.
"""
dependents = set()
for s in all_specs:
for dep in s.traverse(deptype=deptype):
if dep in providers:
dependents.add(s)
return dependents
def filter_exclude(specs, exclude):
"Filter specs given sequence of exclude regex"
to_exclude = [re.compile(e) for e in exclude]
def keep(spec):
for e in to_exclude:
if e.match(spec.name):
return False
return True
return filter(keep, specs)
def get_dependencies(specs):
"Get set of dependencies (includes specs)"
retval = set()
set(map(retval.update, (set(s.traverse()) for s in specs)))
return retval
class ConflictingProjectionsError(SpackError):
"""Raised when a view has a projections file and is given one manually."""
|
py | 1a4a493197abcb187c9c1d6428b89344a7f3954d | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.enums",
marshal="google.ads.googleads.v4",
manifest={"AssetTypeEnum",},
)
class AssetTypeEnum(proto.Message):
r"""Container for enum describing the types of asset."""
class AssetType(proto.Enum):
r"""Enum describing possible types of asset."""
UNSPECIFIED = 0
UNKNOWN = 1
YOUTUBE_VIDEO = 2
MEDIA_BUNDLE = 3
IMAGE = 4
TEXT = 5
BOOK_ON_GOOGLE = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | 1a4a499a94bb658b99703d0180fe42f0dab7a74e | from Effects.Effect import Effect
from PIL import ImageDraw
class Negative(Effect):
def Iteration(self):
for i in range(self.width):
for j in range(self.height):
a = self.pix[i, j][0]
b = self.pix[i, j][1]
c = self.pix[i, j][2]
self.draw.point((i, j), (255 - a, 255 - b, 255 - c))
|
py | 1a4a49bf84568b1875abc6a262bd3363a85c0030 | """Provide the MessageableMixin class."""
from ....const import API_PATH
class MessageableMixin:
"""Interface for classes that can be messaged."""
def message(self, subject, message, from_subreddit=None):
"""
Send a message to a redditor or a subreddit's moderators (mod mail).
:param subject: The subject of the message.
:param message: The message content.
:param from_subreddit: A :class:`~.Subreddit` instance or string to
send the message from. When provided, messages are sent from
the subreddit rather than from the authenticated user.
Note that the authenticated user must be a moderator of the
subreddit and have the ``mail`` moderator permission.
For example, to send a private message to ``u/spez``, try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from PRAW')
To send a message to ``u/spez`` from the moderators of ``r/test`` try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from r/test',
from_subreddit='test')
To send a message to the moderators of ``r/test``, try:
.. code:: python
reddit.subreddit('test').message('TEST', 'test PM from PRAW')
"""
data = {
"subject": subject,
"text": message,
"to": "{}{}".format(
getattr(self.__class__, "MESSAGE_PREFIX", ""), self
),
}
if from_subreddit:
data["from_sr"] = str(from_subreddit)
self._reddit.post(API_PATH["compose"], data=data)
|
py | 1a4a49cf8e4bb47d387bbc8c1a09f47b8d7fb276 | import torch
import shaping
import typing
class GreedyDecoder:
def decode(self, log_probs : shaping.BCt, output_lengths : typing.Optional[shaping.B] = None, K = 1):
# returns list of lists B x l # TODO: (B x H x l)
return [
l[... if K > 1 else 0, :o].tolist() for o,
l in zip(
torch
.as_tensor(output_lengths if output_lengths is not None else [log_probs.shape[-1]] *
len(log_probs)).tolist(),
log_probs.topk(K, dim = 1).indices
)
]
class BeamSearchDecoder:
def __init__(
self,
labels,
lm_path,
beam_width,
beam_alpha = 0,
beam_beta = 0,
cutoff_top_n = 40,
cutoff_prob = 1.0,
num_workers = 1,
topk = 1
):
import ctcdecode
self.topk = topk
self.beam_search_decoder = ctcdecode.CTCBeamDecoder(
list(str(labels).lower()),
lm_path,
beam_alpha,
beam_beta,
cutoff_top_n if cutoff_top_n is not None else len(labels),
cutoff_prob,
beam_width,
num_workers,
labels.blank_idx,
log_probs_input = True
)
def decode(self, log_probs, output_lengths):
list_or_one = lambda xs: xs if len(xs) > 1 else xs[0]
decoded_chr, decoded_scores, decoded_offsets, decoded_lengths = self.beam_search_decoder.decode(log_probs.permute(0, 2, 1).cpu(), torch.as_tensor(output_lengths).cpu().int())
decoded_top_scores, decoded_top_inds = decoded_scores.topk(self.topk, dim = 1)
return [
list_or_one([d[t_, :l[t_]].tolist() for t_ in t.tolist()]) for d,
l,
t in zip(decoded_chr, decoded_lengths, decoded_top_inds)
] #, [list_or_one(t) for t in decoded_top_scores.tolist()]
|
py | 1a4a4a2004117c12734371765e46f305044df41f | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#205. Isomorphic Strings
#Given two strings s and t, determine if they are isomorphic.
#Two strings are isomorphic if the characters in s can be replaced to get t.
#All occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character but a character may map to itself.
#For example,
#Given "egg", "add", return true.
#Given "foo", "bar", return false.
#Given "paper", "title", return true.
#Note:
#You may assume both s and t have the same length.
#class Solution:
# def isIsomorphic(self, s, t):
# """
# :type s: str
# :type t: str
# :rtype: bool
# """
# Time Is Money |
py | 1a4a4b82d0c987b113afca8da3b9e162c3af8e23 | from __future__ import generator_stop
from fissix.fixer_util import syms
from fissix.fixes import fix_import
import libmodernize
class FixImport(fix_import.FixImport):
# Make sure this runs before any other fixer to guarantee that any other
# added absolute_import doesn't block this fixer's execution.
run_order = 1
def transform(self, node, results):
if self.skip:
return
# We're not interested in __future__ imports here
if (
node.type == syms.import_from
and getattr(results["imp"], "value", None) == "__future__"
):
return
# If there are any non-future imports, add absolute_import
# Causes many files to get changed, updated and checked-in with new version unneededly.
# When doing the clean-up after having moved to Python3, the same files will have to get changed,
# updated and checked-in with new version again, just to have the "from __future__ import absolute_import" removed.
#libmodernize.add_future(node, "absolute_import")
return super().transform(node, results)
|
py | 1a4a4ea2c56c4ac5a44a61aacb613672db242f20 | import c4d
from RedshiftWrapper.Redshift import Redshift
def main():
rs = Redshift()
if rs is False: return
#Assign Material
rs.SetMat(doc.GetFirstMaterial())
#Get all node and assign color
listNode = rs.GetAllNodes()
for node in listNode:
node.SetColor()
c4d.EventAdd()
if __name__=='__main__':
main() |
py | 1a4a504b883697c192fc584b3c8c48ec2acf2498 | """
Module for managing operation and controller modes.
Operation modes can be 'auto', 'comfort', 'standby', 'economy', 'protection' and use either a binary DPT or DPT 20.102.
Controller modes use DPT 20.105.
"""
from itertools import chain
from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Union
from xknx.dpt.dpt_hvac_mode import HVACControllerMode, HVACOperationMode
from xknx.exceptions import DeviceIllegalValue
from xknx.remote_value.remote_value_climate_mode import (
RemoteValueBinaryHeatCool,
RemoteValueBinaryOperationMode,
RemoteValueClimateMode,
RemoteValueClimateModeBase,
)
from .device import Device, DeviceCallbackType
if TYPE_CHECKING:
from xknx.remote_value import RemoteValue
from xknx.telegram import Telegram
from xknx.telegram.address import GroupAddressableType
from xknx.xknx import XKNX
class ClimateMode(Device):
"""Class for managing the climate mode."""
# pylint: disable=invalid-name,too-many-instance-attributes
def __init__(
self,
xknx: "XKNX",
name: str,
group_address_operation_mode: Optional["GroupAddressableType"] = None,
group_address_operation_mode_state: Optional["GroupAddressableType"] = None,
group_address_operation_mode_protection: Optional[
"GroupAddressableType"
] = None,
group_address_operation_mode_night: Optional["GroupAddressableType"] = None,
group_address_operation_mode_comfort: Optional["GroupAddressableType"] = None,
group_address_operation_mode_standby: Optional["GroupAddressableType"] = None,
group_address_controller_status: Optional["GroupAddressableType"] = None,
group_address_controller_status_state: Optional["GroupAddressableType"] = None,
group_address_controller_mode: Optional["GroupAddressableType"] = None,
group_address_controller_mode_state: Optional["GroupAddressableType"] = None,
group_address_heat_cool: Optional["GroupAddressableType"] = None,
group_address_heat_cool_state: Optional["GroupAddressableType"] = None,
operation_modes: Optional[List[Union[str, HVACOperationMode]]] = None,
controller_modes: Optional[List[Union[str, HVACControllerMode]]] = None,
device_updated_cb: Optional[DeviceCallbackType] = None,
):
"""Initialize ClimateMode class."""
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements
super().__init__(xknx, name, device_updated_cb)
self.remote_value_operation_mode: RemoteValueClimateMode[
HVACOperationMode
] = RemoteValueClimateMode(
xknx,
group_address=group_address_operation_mode,
group_address_state=group_address_operation_mode_state,
sync_state=True,
device_name=name,
feature_name="Operation mode",
climate_mode_type=RemoteValueClimateMode.ClimateModeType.HVAC_MODE,
after_update_cb=None,
)
self.remote_value_controller_mode: RemoteValueClimateMode[
HVACControllerMode
] = RemoteValueClimateMode(
xknx,
group_address=group_address_controller_mode,
group_address_state=group_address_controller_mode_state,
sync_state=True,
device_name=name,
feature_name="Controller mode",
climate_mode_type=RemoteValueClimateMode.ClimateModeType.HVAC_CONTR_MODE,
after_update_cb=None,
)
self.remote_value_controller_status: RemoteValueClimateMode[
HVACOperationMode
] = RemoteValueClimateMode(
xknx,
group_address=group_address_controller_status,
group_address_state=group_address_controller_status_state,
sync_state=True,
device_name=name,
feature_name="Controller status",
climate_mode_type=RemoteValueClimateMode.ClimateModeType.CONTROLLER_STATUS,
after_update_cb=None,
)
self.remote_value_operation_mode_comfort = RemoteValueBinaryOperationMode(
xknx,
group_address=group_address_operation_mode_comfort,
group_address_state=group_address_operation_mode_comfort,
sync_state=True,
device_name=name,
feature_name="Operation mode Comfort",
operation_mode=HVACOperationMode.COMFORT,
after_update_cb=None,
)
self.remote_value_operation_mode_standby = RemoteValueBinaryOperationMode(
xknx,
group_address=group_address_operation_mode_standby,
group_address_state=group_address_operation_mode_standby,
sync_state=True,
device_name=name,
feature_name="Operation mode Standby",
operation_mode=HVACOperationMode.STANDBY,
after_update_cb=None,
)
self.remote_value_operation_mode_night = RemoteValueBinaryOperationMode(
xknx,
group_address=group_address_operation_mode_night,
group_address_state=group_address_operation_mode_night,
sync_state=True,
device_name=name,
feature_name="Operation mode Night",
operation_mode=HVACOperationMode.NIGHT,
after_update_cb=None,
)
self.remote_value_operation_mode_protection = RemoteValueBinaryOperationMode(
xknx,
group_address=group_address_operation_mode_protection,
group_address_state=group_address_operation_mode_protection,
sync_state=True,
device_name=name,
feature_name="Operation mode Protection",
operation_mode=HVACOperationMode.FROST_PROTECTION,
after_update_cb=None,
)
self.remote_value_heat_cool = RemoteValueBinaryHeatCool(
xknx,
group_address=group_address_heat_cool,
group_address_state=group_address_heat_cool_state,
sync_state=True,
device_name=name,
feature_name="Heat/Cool",
controller_mode=HVACControllerMode.HEAT,
after_update_cb=None,
)
self.operation_mode = HVACOperationMode.STANDBY
self.controller_mode = HVACControllerMode.HEAT
self._operation_modes: List[HVACOperationMode] = []
if operation_modes is None:
self._operation_modes = self.gather_operation_modes()
else:
for op_mode in operation_modes:
if isinstance(op_mode, str):
self._operation_modes.append(HVACOperationMode(op_mode))
elif isinstance(op_mode, HVACOperationMode):
self._operation_modes.append(op_mode)
self._controller_modes: List[HVACControllerMode] = []
if controller_modes is None:
self._controller_modes = self.gather_controller_modes()
else:
for ct_mode in controller_modes:
if isinstance(ct_mode, str):
self._controller_modes.append(HVACControllerMode(ct_mode))
elif isinstance(ct_mode, HVACControllerMode):
self._controller_modes.append(ct_mode)
self.supports_operation_mode = any(
operation_mode.initialized
for operation_mode in self._iter_byte_operation_modes()
) or any(
operation_mode.initialized
for operation_mode in self._iter_binary_operation_modes()
)
self.supports_controller_mode = any(
operation_mode.initialized
for operation_mode in self._iter_controller_remote_values()
)
self._use_binary_operation_modes = any(
operation_mode.initialized
for operation_mode in self._iter_binary_operation_modes()
)
@classmethod
def from_config(cls, xknx: "XKNX", name: str, config: Any) -> "ClimateMode":
"""Initialize object from configuration structure."""
# pylint: disable=too-many-locals
group_address_operation_mode = config.get("group_address_operation_mode")
group_address_operation_mode_state = config.get(
"group_address_operation_mode_state"
)
group_address_operation_mode_protection = config.get(
"group_address_operation_mode_protection"
)
group_address_operation_mode_night = config.get(
"group_address_operation_mode_night"
)
group_address_operation_mode_comfort = config.get(
"group_address_operation_mode_comfort"
)
group_address_operation_mode_standby = config.get(
"group_address_operation_mode_standby"
)
group_address_controller_status = config.get("group_address_controller_status")
group_address_controller_status_state = config.get(
"group_address_controller_status_state"
)
group_address_controller_mode = config.get("group_address_controller_mode")
group_address_controller_mode_state = config.get(
"group_address_controller_mode_state"
)
group_address_heat_cool = config.get("group_address_heat_cool")
group_address_heat_cool_state = config.get("group_address_heat_cool_state")
return cls(
xknx,
name,
group_address_operation_mode=group_address_operation_mode,
group_address_operation_mode_state=group_address_operation_mode_state,
group_address_operation_mode_protection=group_address_operation_mode_protection,
group_address_operation_mode_night=group_address_operation_mode_night,
group_address_operation_mode_comfort=group_address_operation_mode_comfort,
group_address_operation_mode_standby=group_address_operation_mode_standby,
group_address_controller_status=group_address_controller_status,
group_address_controller_status_state=group_address_controller_status_state,
group_address_controller_mode=group_address_controller_mode,
group_address_controller_mode_state=group_address_controller_mode_state,
group_address_heat_cool=group_address_heat_cool,
group_address_heat_cool_state=group_address_heat_cool_state,
)
def _iter_remote_values(
self,
) -> Iterator["RemoteValue"]:
"""Iterate climate mode RemoteValue classes."""
return chain(
self._iter_byte_operation_modes(),
self._iter_controller_remote_values(),
self._iter_binary_operation_modes(),
)
def _iter_byte_operation_modes(
self,
) -> Iterator[RemoteValueClimateMode[HVACOperationMode]]:
"""Iterate normal DPT 20.102 operation mode remote values."""
yield from (
self.remote_value_operation_mode,
self.remote_value_controller_status,
)
def _iter_controller_remote_values(
self,
) -> Iterator[RemoteValueClimateModeBase[HVACControllerMode]]:
"""Iterate DPT 20.105 controller remote values."""
yield from (
self.remote_value_controller_mode,
self.remote_value_heat_cool,
)
def _iter_binary_operation_modes(self) -> Iterator[RemoteValueBinaryOperationMode]:
"""Iterate DPT 1 binary operation modes."""
yield from (
self.remote_value_operation_mode_comfort,
self.remote_value_operation_mode_night,
self.remote_value_operation_mode_protection,
self.remote_value_operation_mode_standby,
)
async def _set_internal_operation_mode(
self, operation_mode: HVACOperationMode
) -> None:
"""Set internal value of operation mode. Call hooks if operation mode was changed."""
if operation_mode != self.operation_mode:
self.operation_mode = operation_mode
await self.after_update()
async def _set_internal_controller_mode(
self, controller_mode: HVACControllerMode
) -> None:
"""Set internal value of controller mode. Call hooks if controller mode was changed."""
if controller_mode != self.controller_mode:
self.controller_mode = controller_mode
await self.after_update()
async def set_operation_mode(self, operation_mode: HVACOperationMode) -> None:
"""Set the operation mode of a thermostat. Send new operation_mode to BUS and update internal state."""
if (
not self.supports_operation_mode
or operation_mode not in self._operation_modes
):
raise DeviceIllegalValue(
"operation (preset) mode not supported", str(operation_mode)
)
rv: RemoteValueClimateModeBase[HVACOperationMode]
for rv in chain(
self._iter_byte_operation_modes(), self._iter_binary_operation_modes()
):
if rv.writable and operation_mode in rv.supported_operation_modes():
await rv.set(operation_mode)
await self._set_internal_operation_mode(operation_mode)
async def set_controller_mode(self, controller_mode: HVACControllerMode) -> None:
"""Set the controller mode of a thermostat. Send new controller mode to the bus and update internal state."""
if (
not self.supports_controller_mode
or controller_mode not in self._controller_modes
):
raise DeviceIllegalValue(
"controller (HVAC) mode not supported", str(controller_mode)
)
rv: RemoteValueClimateModeBase[HVACControllerMode]
for rv in self._iter_controller_remote_values():
if rv.writable and controller_mode in rv.supported_operation_modes():
await rv.set(controller_mode)
await self._set_internal_controller_mode(controller_mode)
@property
def operation_modes(self) -> List[HVACOperationMode]:
"""Return all configured operation modes."""
if not self.supports_operation_mode:
return []
return self._operation_modes
@property
def controller_modes(self) -> List[HVACControllerMode]:
"""Return all configured controller modes."""
if not self.supports_controller_mode:
return []
return self._controller_modes
def gather_operation_modes(self) -> List[HVACOperationMode]:
"""Gather operation modes from RemoteValues."""
operation_modes: List[HVACOperationMode] = []
for rv in chain(
self._iter_binary_operation_modes(), self._iter_byte_operation_modes()
):
if rv.writable:
operation_modes.extend(rv.supported_operation_modes())
# remove duplicates
return list(set(operation_modes))
def gather_controller_modes(self) -> List[HVACControllerMode]:
"""Gather controller modes from RemoteValues."""
controller_modes: List[HVACControllerMode] = []
for rv in self._iter_controller_remote_values():
if rv.writable:
controller_modes.extend(rv.supported_operation_modes())
# remove duplicates
return list(set(controller_modes))
async def process_group_write(self, telegram: "Telegram") -> None:
"""Process incoming and outgoing GROUP WRITE telegram."""
if self.supports_operation_mode:
for rv in self._iter_remote_values():
if await rv.process(telegram):
# ignore inactive RemoteValueBinaryOperationMode
if rv.value:
await self._set_internal_operation_mode(rv.value)
return
if self.supports_controller_mode:
for rv in self._iter_controller_remote_values():
if await rv.process(telegram):
await self._set_internal_controller_mode(rv.value)
return
def __str__(self) -> str:
"""Return object as readable string."""
return (
'<ClimateMode name="{}" '
'operation_mode="{}" '
'controller_mode="{}" '
'controller_status="{}" '
"/>".format(
self.name,
self.remote_value_operation_mode.group_addr_str(),
self.remote_value_controller_mode.group_addr_str(),
self.remote_value_controller_status.group_addr_str(),
)
)
|
py | 1a4a518c95047051a7d0d9dee94b3749ee5ccc51 | import requests
from mooshak2api.user import User
class Client:
"""
A bundling of a mooshak2 endpoint, and a User. In most cases you should use mooshak2api.login rather than initiating
this class yourself.
You should add a User to this Object manually, via using self.user.
Included are default headers that should be used for most JSON based messages.
"""
endpoint = None
user = None
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
def __init__(self, endpoint):
self.endpoint = endpoint
assert self.test()
def headers_with_auth(self) -> dict:
"""
Returns common headers with the JWT Header included. Ensure that self.user is a User Object, and that
the user has been logged in with the .login() method.
:return: returns a dict containing headers
"""
try:
return {**self.headers, "Authorization": f"Bearer {self.user.token}"}
except AttributeError:
raise Exception("You should ensure that this client has a user set, and that the user has been logged in!")
def test(self):
"""
Tests the connection.rst to the server
:return: returns True if a connection.rst could be made
"""
r = requests.get(self.endpoint)
return r.json()["result"]["value"] == "Welcome to Mooshak 2.0 API"
def refresh(self):
"""
Refreshes this users token. Should be called every x seconds, in order to ensure that the user stays logged in
"""
r = requests.post(f"{self.endpoint}auth/refresh/", headers=self.headers_with_auth())
r.raise_for_status()
def login(endpoint, username, password, contest=None, admin=False):
"""
Creates an authenticated client to interact with the Mooshak 2 API
:param endpoint: The API Endpoint, ending in a slash. E.g, https://mooshak2.dcc.fc.up.pt/mooshak-test/api/
:param username: A username for the connection.rst, E.g. admin
:param password: A password for the connection.rst, E.g. admin
:param contest: The contest for the (non admin) user to interact with
:param admin: If this is an admin account, and you want to administrate
:return: A Client Object
"""
if not admin and contest is None:
raise Exception("You must specify a contest, or set admin to True")
client = Client(endpoint)
client_user = User(username, password, contest=contest, admin=admin)
client_user.login(client)
client.user = client_user
return client
|
py | 1a4a51dcfa7c2da3a0e8fde33c671bef30fcfce6 | #!/usr/bin/env python
import contextlib
from unittest import mock
from absl.testing import absltest
from grr_response_client import vfs as client_vfs
from grr_response_client.unprivileged import test_lib
from grr_response_client.unprivileged.filesystem import vfs
from grr_response_client.vfs_handlers import tsk_test_lib
class VfsTskTest(tsk_test_lib.TSKTest):
def setUp(self):
super().setUp()
stack = contextlib.ExitStack()
self.addCleanup(stack.close)
stack.enter_context(
mock.patch.dict(client_vfs.VFS_HANDLERS, {
vfs.UnprivilegedTskFile.supported_pathtype: vfs.UnprivilegedTskFile,
}))
def setUpModule():
test_lib.SetUpDummyConfig()
client_vfs.Init()
def tearDownModule():
vfs.MOUNT_CACHE.Flush()
if __name__ == "__main__":
absltest.main()
|
py | 1a4a51efb63ef9952fd1bea533552954027da4db | from django import template
register = template.Library()
@register.simple_tag
def get_param(obj, period, key, floatformat, *args, **kwargs):
fformat = '%.' + '%d' % int(floatformat) + 'f'
return fformat % obj.model_params[period][key]
# TODO, fazer class e CSS
@register.simple_tag
def color_adf(value, threshold):
try:
if value > threshold:
return 'background-color:#e9bdba;'
print(value, threshold, value > threshold)
except (ValueError, TypeError) as e:
print(e)
return ''
@register.simple_tag
def color_zscore(value, threshold):
try:
if (value > threshold) or (value < -threshold):
return 'background-color:#bde9ba;'
print(value, threshold, (value > threshold) or (value < -threshold))
except (ValueError, TypeError) as e:
print(e)
return ''
@register.simple_tag
def n_p_coint(obj, pvalue):
return obj.n_p_coint(pvalue) |
py | 1a4a5492d5d8399d90a99f3a7f3cbf2b470c9e35 | """API to persist and read upload data metrics"""
import os
import sqlite3
DATABASE_FILENAME = os.path.expanduser('~/pypic.db')
def create_upload_table():
"""Create the necessary database objects for upload monitoring and
persistent data regarding all things video uploads
"""
db_connection = sqlite3.connect(DATABASE_FILENAME)
cursor = db_connection.cursor()
if not len(
cursor.execute(
'select * from sqlite_master where name = ?',
('uploads',)
).fetchall()):
cursor.execute(
'''create table uploads (
date_created text,
file_name text,
uploaded integer,
other_info text
)'''
)
def insert_upload_data(file_name, date_created, is_uploaded, other_info):
"""Insert the necessary data to reflect whether or not a video was
uploaded
"""
db_connection = sqlite3.connect(DATABASE_FILENAME)
cursor = db_connection.cursor()
cursor.execute(
'insert into uploads values (?, ?, ?, ?)',
(str(date_created), file_name, int(is_uploaded), other_info)
)
db_connection.commit()
db_connection.close()
|
py | 1a4a555fc6486cdf3bf31852f752e878c2d57627 | from gcn.inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
#self.degree_mat = degree_mat
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
#self.ob = placeholders['observation']
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
#with tf.variable_scope('_vars'):
#for i in range(len(self.support)):
for i in range(1):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],name='weights_' + str(i))
tf.add_to_collection('weight', self.vars['weights_' + str(i)])
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
with tf.variable_scope(self.name +'_adj_vars'):
self.vars['adj'] = tf.get_variable(name='adj',shape=self.support.shape,
initializer=tf.constant_initializer(self.support),
trainable=False)
tf.add_to_collection('adj', self.vars['adj'])
# with tf.variable_scope(self.name +'_identity_vars'):
# self.vars['identity'] = tf.get_variable(name='identity',shape=self.identity.shape,
# initializer=tf.constant_initializer(self.identity),
# trainable=False)
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
#for i in range(len(self.support)):
for i in range(1):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
degree_inverted = tf.diag(tf.rsqrt(tf.reduce_sum(self.vars['adj'], 1)))
normalized_adj = tf.matmul(self.vars['adj'], degree_inverted)
normalized_adj = tf.transpose(normalized_adj)
normalized_adj = tf.matmul(normalized_adj, degree_inverted)
support = dot(normalized_adj, pre_sup, sparse=False)
supports.append(support)
output = tf.add_n(supports)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
|
py | 1a4a55a67c938e4ecee1f99ee6e965adc2a82c04 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
import sys
sys.path.extend([os.path.dirname(os.path.abspath(__file__))])
import cv2
import time
import numpy as np
import tensorflow as tf
import utils
from OneEuroFilter import OneEuroFilter
class VNectEstimator:
# the side length of the CNN input box
box_size = 368
# the input box size is 8 times the side length of the output heatmaps
hm_factor = 8
# sum of the joints to be detected
joints_sum = 21
# parent joint indexes of each joint (for plotting the skeletal lines)
joint_parents = [16, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7, 10, 13]
def __init__(self):
print('Initializing VNect Estimator...')
# the scale factors to zoom down the input image crops
# put different scales to get better average performance
# for faster loops, use less scales e.g. [1], [1, 0.7]
self.scales = [1, 0.85, 0.7]
# initializing one euro filters for all the joints
filter_config_2d = {
'freq': 30, # system frequency about 30 Hz
'mincutoff': 1.7, # value refer to the paper
'beta': 0.3, # value refer to the paper
'dcutoff': 0.4 # not mentioned, empirically set
}
filter_config_3d = {
'freq': 30, # system frequency about 30 Hz
'mincutoff': 0.8, # value refer to the paper
'beta': 0.4, # value refer to the paper
'dcutoff': 0.4 # not mentioned, empirically set
}
self.filter_2d = [(OneEuroFilter(**filter_config_2d),
OneEuroFilter(**filter_config_2d))
for _ in range(self.joints_sum)]
self.filter_3d = [(OneEuroFilter(**filter_config_3d),
OneEuroFilter(**filter_config_3d),
OneEuroFilter(**filter_config_3d))
for _ in range(self.joints_sum)]
# load pretrained VNect model
self.sess = tf.Session()
if os.getcwd().endswith('src'):
saver = tf.train.import_meta_graph('../models/tf_model/vnect_tf.meta')
saver.restore(self.sess, tf.train.latest_checkpoint('../models/tf_model/'))
else:
saver = tf.train.import_meta_graph('./models/tf_model/vnect_tf.meta')
saver.restore(self.sess, tf.train.latest_checkpoint('./models/tf_model/'))
graph = tf.get_default_graph()
self.input_crops = graph.get_tensor_by_name('Placeholder:0')
self.heatmap = graph.get_tensor_by_name('split_2:0')
self.x_heatmap = graph.get_tensor_by_name('split_2:1')
self.y_heatmap = graph.get_tensor_by_name('split_2:2')
self.z_heatmap = graph.get_tensor_by_name('split_2:3')
print('VNect Estimator initialized.')
@staticmethod
def gen_input_batch(img_input, box_size, scales):
# input image --> sqrared image acceptable for the model
img_square, scaler, [offset_x, offset_y] = utils.img_scale_squarify(img_input, box_size)
# generate multi-scale image batch
input_batch = []
for scale in scales:
img = utils.img_scale_padding(img_square, scale, box_size) if scale < 1 else img_square
input_batch.append(img)
# image value range: [0, 255) --> [-0.4, 0.6)
input_batch = np.asarray(input_batch, dtype=np.float32) / 255 - 0.4
return input_batch, scaler, [offset_x, offset_y]
def joint_filter(self, joints, dim=2):
t = time.time()
if dim == 2:
for i in range(self.joints_sum):
joints[i, 0] = self.filter_2d[i][0](joints[i, 0], t)
joints[i, 1] = self.filter_2d[i][1](joints[i, 1], t)
else:
for i in range(self.joints_sum):
joints[i, 0] = self.filter_3d[i][0](joints[i, 0], t)
joints[i, 1] = self.filter_3d[i][1](joints[i, 1], t)
joints[i, 2] = self.filter_3d[i][2](joints[i, 2], t)
return joints
def __call__(self, img_input):
t0 = time.time()
img_batch, scaler, [offset_x, offset_y] = self.gen_input_batch(img_input, self.box_size, self.scales)
hm, xm, ym, zm = self.sess.run([self.heatmap,
self.x_heatmap,
self.y_heatmap,
self.z_heatmap],
{self.input_crops: img_batch})
# averaging the outputs with different scales
hm_size = self.box_size // self.hm_factor
hm_avg = np.zeros((hm_size, hm_size, self.joints_sum))
xm_avg = np.zeros((hm_size, hm_size, self.joints_sum))
ym_avg = np.zeros((hm_size, hm_size, self.joints_sum))
zm_avg = np.zeros((hm_size, hm_size, self.joints_sum))
for i in range(len(self.scales)):
rescale = 1.0 / self.scales[i]
scaled_hm = utils.img_scale(hm[i, :, :, :], rescale)
scaled_x_hm = utils.img_scale(xm[i, :, :, :], rescale)
scaled_y_hm = utils.img_scale(ym[i, :, :, :], rescale)
scaled_z_hm = utils.img_scale(zm[i, :, :, :], rescale)
mid = [scaled_hm.shape[0] // 2, scaled_hm.shape[1] // 2]
hm_avg += scaled_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
xm_avg += scaled_x_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
ym_avg += scaled_y_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
zm_avg += scaled_z_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
hm_avg /= len(self.scales)
xm_avg /= len(self.scales)
ym_avg /= len(self.scales)
zm_avg /= len(self.scales)
# joints_2d are in box size scale
joints_2d = utils.extract_2d_joints(hm_avg, self.box_size, self.hm_factor)
joints_2d = self.joint_filter(joints_2d, dim=2)
joints_3d = utils.extract_3d_joints(joints_2d, xm_avg, ym_avg, zm_avg, self.hm_factor)
joints_3d = self.joint_filter(joints_3d, dim=3)
# rescale joints_2d to input image scale
joints_2d[:, 0] = (joints_2d[:, 0] - offset_y) / scaler
joints_2d[:, 1] = (joints_2d[:, 1] - offset_x) / scaler
print('FPS: {:>2.2f}'.format(1 / (time.time() - t0)))
return joints_2d, joints_3d
if __name__ == '__main__':
estimator = VNectEstimator()
j_2d, j_3d = estimator(cv2.imread('../pic/test_pic.jpg'))
print('\njoints_2d')
for i, j in enumerate(j_2d):
print(i, j)
print('\njoints_3d')
for i, j in enumerate(j_3d):
print(i, j)
|
py | 1a4a56330f9fcb6562c50647b39868c5d258a159 | """An implementation of unbounded stacks."""
class Stack:
"""An ordered collection of items, organised in a pile.
Items are added to the top of the pile.
Only the last item added can be accessed or removed.
"""
# Creator
# -------
def __init__(self):
"""Initialise the stack to be empty."""
self._items = []
# Inspectors
# ----------
def is_empty(self):
"""Return True if the stack is empty, otherwise False."""
return self._items == []
def __len__(self):
"""Implement the `len` function for stacks.
Return the number of items in the stack.
"""
return len(self._items)
def top(self):
"""Return the item on top of the stack.
Assume the stack is not empty.
"""
assert not self.is_empty()
return self._items[-1]
def __contains__(self, item):
"""Implement the `in` operator for stacks.
Return True if the stack has the item, otherwise False.
"""
return item in self._items
# Modifiers
# ---------
def push(self, item):
"""Add the item to the top of the stack. Return nothing."""
self._items.append(item)
def pop(self):
"""Remove and return the item on top of the stack.
Assume the stack is not empty.
"""
assert not self.is_empty()
return self._items.pop()
# Exercises
# ---------
# - What is the run-time complexity of each operation?
|
py | 1a4a56c39fb24f2f6e0eb21ac565c2ce75af2ca6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 24 17:19:18 2022
@author: justoliver
"""
import pymunk, sys
from pymunk.pygame_util import *
from pymunk.vec2d import Vec2d
import pygame
from pygame.locals import *
import numpy as np
from PIL import Image
from pymunk.pygame_util import DrawOptions
size = 800, 800
display = pygame.display.set_mode((size))
options = DrawOptions(display)
clock = pygame.time.Clock()
space = pymunk.Space()
space.gravity = 0, 981
b0 = space.static_body
b1 = space.static_body
FPS = 120
def convert_coordinates(point):
return int(point[0]), int(800-point[1])
def get_theta(x_h, x_1, y_h, y_1):
return np.arctan2(x_1 - x_h, y_1 - y_h)
def get_phi(x1, x2, y1, y2, theta):
return np.arctan2(x2 - x1, y2- y1) - theta
def get_iota(x1, x2, y1, y2, theta, phi):
return np.arctan2(x2 -x1, y2 - y1) - theta - phi
class measurement_body:
def __init__(self):
self.body = pymunk.Body()
self.body.position = (400,40)
self.shape = pymunk.Circle(self.body, 1)
self.shape.color = (255,0,0)
space.add(self.body, self.shape)
class Segment2:
def __init__(self, p0, a, b, radius=10, center_of_gravity = (0,0), density=0.01):
self.body = pymunk.Body()
self.body.position = p0
self.radius = radius
self.a = a
self.b = b
self.body.center_of_gravity = center_of_gravity
self.shape = pymunk.Segment(self.body, self.a, self.b, radius)
self.shape.density = density
self.shape.elasticity = 0
self.shape.filter = pymunk.ShapeFilter(group=1)
self.shape.color = (0, 255, 0, 0)
space.add(self.body, self.shape)
class Leg:
def __init__(self, p0, a, b, c, d, radius=10, center_of_gravity = (0,0), density=0.01):
self.body = pymunk.Body()
self.body.position = p0
self.radius = radius
self.a = a
self.b = b
self.c = c
self.d = d
self.body.center_of_gravity = center_of_gravity
self.leg= pymunk.Segment(self.body, self.a, self.b , radius=radius)
self.leg.filter = pymunk.ShapeFilter(group = 1)
self.leg.density = density
self.foot= pymunk.Segment(self.body, self.c, self.d)
s2.filter = pymunk.ShapeFilter(group = 1)
se
self.shape.elasticity = 0
self.shape.filter = pymunk.ShapeFilter(group=1)
self.shape.color = (0, 255, 0, 0)
space.add(self.body, self.shape)
class Simplemotor:
def __init__(self, b, b2, rate=5, switch="off"):
self.rate = rate
self.b = b
self.b2 = b2
self.simplemotor = pymunk.SimpleMotor(self.b, self.b2, self.rate)
self.switch = switch
def drive(self, constraints, phi):
if phi >= np.pi/2 and len(constraints) == 5:
space.remove(self.simplemotor)
elif self.switch == "off" and len(constraints) == 5:
space.remove(self.simplemotor)
elif self.switch == "on" and len(constraints) < 5 and phi < np.pi/2:
space.add(self.simplemotor)
class RotaryLimitJoint:
def __init__(self, b, b2, min, max, collide=True):
joint = pymunk.constraints.RotaryLimitJoint(b, b2, min, max)
joint.collide_bodies = collide
space.add(joint)
# class dead_hang_joint:
# def __init__(self, b, b2, min, max, collide=True):
# joint = pymunk.constraints.RotaryLimitJoint(b, b2, min, angvel1}\nseg2:{angvel2}")
# print(segment2.bomax)
# joint.collide_bodies = collide
# def dead_position(self, constraints, phi):
# if phi == 0 and len(constraints) < 6:
class PivotJoint:
def __init__(self, b, b2, a=(0, 0), a2=(0, 0), collide=True):
joint = pymunk.constraints.PinJoint(b, b2, a, a2)
joint.collide_bodies = collide
space.add(joint)
class PinJoint:
def __init__(self, b, b2, a=(0, 0), a2=(0, 0)):
joint = pymunk.constraints.PinJoint(b, b2, a, a2)
space.add(joint)
class Swing_body:
def __init__(self,p0, vx1,vy1,vx2,vy2,vx3,vy3, radius=10, center_of_gravity = (0,0), density=0.05):
self.body = pymunk.Body()
self.body.position = p0
s1 = pymunk.Segment(self.body, vx1, vy1 , radius=radius)
s1.filter = pymunk.ShapeFilter(group = 1)
s1.density = density
s2 = pymunk.Segment(self.body, vx2, vy2, radius=radius)
s2.filter = pymunk.ShapeFilter(group = 1)
s2.density = density
s3 = pymunk.Segment(self.body, vx3,vy3, radius=radius)
s3.filter = pymunk.ShapeFilter(group = 1)
s3.density = density
space.add(self.body, s1,s2,s3)
def angle_reached(theta, high_score):
if len(high_score) == 0:
high_score.append(theta)
elif high_score[0] < abs(theta):
high_score[0] = abs(theta)
highest_score = high_score[0]
return high_score
# b1 = measurement_body()
hinge_point1 = (0, -100) # seg 1
hinge_point2 = (0, 100)
swing_body = (400, 625)
swing_top1 = (30, -25)
swing_top2 = (-30, -25)
swing_mid1 = (0, -25)
swing_mid2 = (0, 25)
swing_bottom1 = (-20, 25)
swing_bottom2 = (20, 25)
hinge_point3 = (0, -30) # seg 2
hinge_point4 = (0, 30)
rate = 3
segment = Segment2((400 , 500), hinge_point1 , hinge_point2)
segment2 = Segment2((420,680), hinge_point3, hinge_point4, density= 0.05)
swing = Swing_body(swing_body, swing_top1,swing_top2, swing_mid1, swing_mid2, swing_bottom1, swing_bottom2)
PinJoint(swing.body, segment2.body, swing_bottom2, hinge_point3)
PinJoint(segment.body, swing.body, hinge_point2, swing_mid1)
PinJoint(b0, segment.body, (400,400), hinge_point1)
simplemotor = Simplemotor(swing.body, segment2.body, rate)
rotlimjoint = RotaryLimitJoint(swing.body, segment2.body, -np.pi/2, np.pi/4)
def game():
pygame.display.set_caption("Double pendulum interactive Simulation")
high_score = []
while True:
xh, yh = (400,400)
x1, y1 = segment.body.position[0], segment.body.position[1]
theta = get_theta(xh, x1, yh, y1)
x2, y2 = segment.body.position[0] + 100*np.sin(theta) , segment.body.position[1] + 100*np.cos(theta)
x3, y3 = swing.body.position[0], swing.body.position[1]
phi = get_phi(x2, x3, y2, y3, theta)
x4, y4 = swing.body.position[0] + 25*np.sin(theta+phi) + 20*np.cos(theta+phi), swing.body.position[1] + 25*np.cos(theta+phi) - 20*np.sin(theta+phi)
x5, y5 = segment2.body.position[0], segment2.body.position[1]
iota = get_iota(x4, x5, y4, y5, theta, phi)
print(f"iota={iota}")
angvel1 = swing.body.angular_velocity
angvel2 = -segment2.body.angular_velocity
# print(f"seg1:{angvel1}\nseg2:{angvel2}")
# print(segment2.body.angular_velocity)
# abs_vel = np.sqrt(segment.body.velocity[0]**2 + segment.body.velocity[1]**2)
# if segment.body.velocity[0]< 1:
# rad_vel = -abs_vel/150
# else:
# rad_vel = abs_vel/150
# print(rad_vel)
for event in pygame.event.get(): # checking for user input
if event.type == pygame.QUIT:
print(f"Highest angle reached was:{np.rad2deg(high_score)}")
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]: # kick input
simplemotor.switch = "on"
if iota >= np.pi/2:
if len(space.constraints) == 5:
space.remove(simplemotor.simplemotor)
segment2.body.angular_velocity = angvel1
else:
simplemotor.drive(space.constraints, phi)
else:
simplemotor.switch = "off"
if iota <= 0:
segment2.body.angular_velocity = angvel1
else:
simplemotor.drive(space.constraints, phi)
high_score = angle_reached(theta, high_score)
display.fill((255, 255, 255))
space.debug_draw(options)
pygame.display.update()
clock.tick(FPS) # limiting frames per second to 120
space.step(1/FPS)
game()
pygame.quit() |
py | 1a4a56cc4f9bfd4f60124f66ef514d751ae9c844 | import struct
from gearman.constants import PRIORITY_NONE, PRIORITY_LOW, PRIORITY_HIGH
from gearman.errors import ProtocolError
from gearman import compat
# Protocol specific constants
NULL_CHAR = '\x00'
MAGIC_RES_STRING = '%sRES' % NULL_CHAR
MAGIC_REQ_STRING = '%sREQ' % NULL_CHAR
COMMAND_HEADER_SIZE = 12
# Gearman commands 1-9
GEARMAN_COMMAND_CAN_DO = 1
GEARMAN_COMMAND_CANT_DO = 2
GEARMAN_COMMAND_RESET_ABILITIES = 3
GEARMAN_COMMAND_PRE_SLEEP = 4
GEARMAN_COMMAND_NOOP = 6
GEARMAN_COMMAND_SUBMIT_JOB = 7
GEARMAN_COMMAND_JOB_CREATED = 8
GEARMAN_COMMAND_GRAB_JOB = 9
# Gearman commands 10-19
GEARMAN_COMMAND_NO_JOB = 10
GEARMAN_COMMAND_JOB_ASSIGN = 11
GEARMAN_COMMAND_WORK_STATUS = 12
GEARMAN_COMMAND_WORK_COMPLETE = 13
GEARMAN_COMMAND_WORK_FAIL = 14
GEARMAN_COMMAND_GET_STATUS = 15
GEARMAN_COMMAND_ECHO_REQ = 16
GEARMAN_COMMAND_ECHO_RES = 17
GEARMAN_COMMAND_SUBMIT_JOB_BG = 18
GEARMAN_COMMAND_ERROR = 19
# Gearman commands 20-29
GEARMAN_COMMAND_STATUS_RES = 20
GEARMAN_COMMAND_SUBMIT_JOB_HIGH = 21
GEARMAN_COMMAND_SET_CLIENT_ID = 22
GEARMAN_COMMAND_CAN_DO_TIMEOUT = 23
GEARMAN_COMMAND_ALL_YOURS = 24
GEARMAN_COMMAND_WORK_EXCEPTION = 25
GEARMAN_COMMAND_OPTION_REQ = 26
GEARMAN_COMMAND_OPTION_RES = 27
GEARMAN_COMMAND_WORK_DATA = 28
GEARMAN_COMMAND_WORK_WARNING = 29
# Gearman commands 30-39
GEARMAN_COMMAND_GRAB_JOB_UNIQ = 30
GEARMAN_COMMAND_JOB_ASSIGN_UNIQ = 31
GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG = 32
GEARMAN_COMMAND_SUBMIT_JOB_LOW = 33
GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG = 34
# Fake command code
GEARMAN_COMMAND_TEXT_COMMAND = 9999
GEARMAN_PARAMS_FOR_COMMAND = {
# Gearman commands 1-9
GEARMAN_COMMAND_CAN_DO: ['task'],
GEARMAN_COMMAND_CANT_DO: ['task'],
GEARMAN_COMMAND_RESET_ABILITIES: [],
GEARMAN_COMMAND_PRE_SLEEP: [],
GEARMAN_COMMAND_NOOP: [],
GEARMAN_COMMAND_SUBMIT_JOB: ['task', 'unique', 'data'],
GEARMAN_COMMAND_JOB_CREATED: ['job_handle'],
GEARMAN_COMMAND_GRAB_JOB: [],
# Gearman commands 10-19
GEARMAN_COMMAND_NO_JOB: [],
GEARMAN_COMMAND_JOB_ASSIGN: ['job_handle', 'task', 'data'],
GEARMAN_COMMAND_WORK_STATUS: ['job_handle', 'numerator', 'denominator'],
GEARMAN_COMMAND_WORK_COMPLETE: ['job_handle', 'data'],
GEARMAN_COMMAND_WORK_FAIL: ['job_handle'],
GEARMAN_COMMAND_GET_STATUS: ['job_handle'],
GEARMAN_COMMAND_ECHO_REQ: ['data'],
GEARMAN_COMMAND_ECHO_RES: ['data'],
GEARMAN_COMMAND_SUBMIT_JOB_BG: ['task', 'unique', 'data'],
GEARMAN_COMMAND_ERROR: ['error_code', 'error_text'],
# Gearman commands 20-29
GEARMAN_COMMAND_STATUS_RES: ['job_handle', 'known', 'running', 'numerator', 'denominator'],
GEARMAN_COMMAND_SUBMIT_JOB_HIGH: ['task', 'unique', 'data'],
GEARMAN_COMMAND_SET_CLIENT_ID: ['client_id'],
GEARMAN_COMMAND_CAN_DO_TIMEOUT: ['task', 'timeout'],
GEARMAN_COMMAND_ALL_YOURS: [],
GEARMAN_COMMAND_WORK_EXCEPTION: ['job_handle', 'data'],
GEARMAN_COMMAND_OPTION_REQ: ['option_name'],
GEARMAN_COMMAND_OPTION_RES: ['option_name'],
GEARMAN_COMMAND_WORK_DATA: ['job_handle', 'data'],
GEARMAN_COMMAND_WORK_WARNING: ['job_handle', 'data'],
# Gearman commands 30-39
GEARMAN_COMMAND_GRAB_JOB_UNIQ: [],
GEARMAN_COMMAND_JOB_ASSIGN_UNIQ: ['job_handle', 'task', 'unique', 'data'],
GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG: ['task', 'unique', 'data'],
GEARMAN_COMMAND_SUBMIT_JOB_LOW: ['task', 'unique', 'data'],
GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG: ['task', 'unique', 'data'],
# Fake gearman command
GEARMAN_COMMAND_TEXT_COMMAND: ['raw_text']
}
GEARMAN_COMMAND_TO_NAME = {
GEARMAN_COMMAND_CAN_DO: 'GEARMAN_COMMAND_CAN_DO',
GEARMAN_COMMAND_CANT_DO: 'GEARMAN_COMMAND_CANT_DO',
GEARMAN_COMMAND_RESET_ABILITIES: 'GEARMAN_COMMAND_RESET_ABILITIES',
GEARMAN_COMMAND_PRE_SLEEP: 'GEARMAN_COMMAND_PRE_SLEEP',
GEARMAN_COMMAND_NOOP: 'GEARMAN_COMMAND_NOOP',
GEARMAN_COMMAND_SUBMIT_JOB: 'GEARMAN_COMMAND_SUBMIT_JOB',
GEARMAN_COMMAND_JOB_CREATED: 'GEARMAN_COMMAND_JOB_CREATED',
GEARMAN_COMMAND_GRAB_JOB: 'GEARMAN_COMMAND_GRAB_JOB',
# Gearman commands 10-19
GEARMAN_COMMAND_NO_JOB: 'GEARMAN_COMMAND_NO_JOB',
GEARMAN_COMMAND_JOB_ASSIGN: 'GEARMAN_COMMAND_JOB_ASSIGN',
GEARMAN_COMMAND_WORK_STATUS: 'GEARMAN_COMMAND_WORK_STATUS',
GEARMAN_COMMAND_WORK_COMPLETE: 'GEARMAN_COMMAND_WORK_COMPLETE',
GEARMAN_COMMAND_WORK_FAIL: 'GEARMAN_COMMAND_WORK_FAIL',
GEARMAN_COMMAND_GET_STATUS: 'GEARMAN_COMMAND_GET_STATUS',
GEARMAN_COMMAND_ECHO_REQ: 'GEARMAN_COMMAND_ECHO_REQ',
GEARMAN_COMMAND_ECHO_RES: 'GEARMAN_COMMAND_ECHO_RES',
GEARMAN_COMMAND_SUBMIT_JOB_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_BG',
GEARMAN_COMMAND_ERROR: 'GEARMAN_COMMAND_ERROR',
# Gearman commands 20-29
GEARMAN_COMMAND_STATUS_RES: 'GEARMAN_COMMAND_STATUS_RES',
GEARMAN_COMMAND_SUBMIT_JOB_HIGH: 'GEARMAN_COMMAND_SUBMIT_JOB_HIGH',
GEARMAN_COMMAND_SET_CLIENT_ID: 'GEARMAN_COMMAND_SET_CLIENT_ID',
GEARMAN_COMMAND_CAN_DO_TIMEOUT: 'GEARMAN_COMMAND_CAN_DO_TIMEOUT',
GEARMAN_COMMAND_ALL_YOURS: 'GEARMAN_COMMAND_ALL_YOURS',
GEARMAN_COMMAND_WORK_EXCEPTION: 'GEARMAN_COMMAND_WORK_EXCEPTION',
GEARMAN_COMMAND_OPTION_REQ: 'GEARMAN_COMMAND_OPTION_REQ',
GEARMAN_COMMAND_OPTION_RES: 'GEARMAN_COMMAND_OPTION_RES',
GEARMAN_COMMAND_WORK_DATA: 'GEARMAN_COMMAND_WORK_DATA',
GEARMAN_COMMAND_WORK_WARNING: 'GEARMAN_COMMAND_WORK_WARNING',
# Gearman commands 30-39
GEARMAN_COMMAND_GRAB_JOB_UNIQ: 'GEARMAN_COMMAND_GRAB_JOB_UNIQ',
GEARMAN_COMMAND_JOB_ASSIGN_UNIQ: 'GEARMAN_COMMAND_JOB_ASSIGN_UNIQ',
GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG',
GEARMAN_COMMAND_SUBMIT_JOB_LOW: 'GEARMAN_COMMAND_SUBMIT_JOB_LOW',
GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG: 'GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG',
GEARMAN_COMMAND_TEXT_COMMAND: 'GEARMAN_COMMAND_TEXT_COMMAND'
}
GEARMAN_SERVER_COMMAND_STATUS = 'status'
GEARMAN_SERVER_COMMAND_VERSION = 'version'
GEARMAN_SERVER_COMMAND_WORKERS = 'workers'
GEARMAN_SERVER_COMMAND_MAXQUEUE = 'maxqueue'
GEARMAN_SERVER_COMMAND_SHUTDOWN = 'shutdown'
def get_command_name(cmd_type):
return GEARMAN_COMMAND_TO_NAME.get(cmd_type, cmd_type)
def submit_cmd_for_background_priority(background, priority):
cmd_type_lookup = {
(True, PRIORITY_NONE): GEARMAN_COMMAND_SUBMIT_JOB_BG,
(True, PRIORITY_LOW): GEARMAN_COMMAND_SUBMIT_JOB_LOW_BG,
(True, PRIORITY_HIGH): GEARMAN_COMMAND_SUBMIT_JOB_HIGH_BG,
(False, PRIORITY_NONE): GEARMAN_COMMAND_SUBMIT_JOB,
(False, PRIORITY_LOW): GEARMAN_COMMAND_SUBMIT_JOB_LOW,
(False, PRIORITY_HIGH): GEARMAN_COMMAND_SUBMIT_JOB_HIGH
}
lookup_tuple = (background, priority)
cmd_type = cmd_type_lookup[lookup_tuple]
return cmd_type
def parse_binary_command(in_buffer, is_response=True):
"""Parse data and return (command type, command arguments dict, command size)
or (None, None, data) if there's not enough data for a complete command.
"""
in_buffer_size = len(in_buffer)
magic = None
cmd_type = None
cmd_args = None
cmd_len = 0
expected_packet_size = None
# If we don't have enough data to parse, error early
if in_buffer_size < COMMAND_HEADER_SIZE:
return cmd_type, cmd_args, cmd_len
# By default, we'll assume we're dealing with a gearman command
magic, cmd_type, cmd_len = struct.unpack('!4sII', in_buffer[:COMMAND_HEADER_SIZE])
magic = magic.decode('utf-8')
received_bad_response = is_response and bool(magic != MAGIC_RES_STRING)
received_bad_request = not is_response and bool(magic != MAGIC_REQ_STRING)
if received_bad_response or received_bad_request:
raise ProtocolError('Malformed Magic')
expected_cmd_params = GEARMAN_PARAMS_FOR_COMMAND.get(cmd_type, None)
# GEARMAN_COMMAND_TEXT_COMMAND is a faked command that we use to support server text-based commands
if expected_cmd_params is None or cmd_type == GEARMAN_COMMAND_TEXT_COMMAND:
raise ProtocolError('Received unknown binary command: %s' % cmd_type)
# If everything indicates this is a valid command, we should check to see if we have enough stuff to read in our buffer
expected_packet_size = COMMAND_HEADER_SIZE + cmd_len
if in_buffer_size < expected_packet_size:
return None, None, 0
binary_payload = in_buffer[COMMAND_HEADER_SIZE:expected_packet_size]
# binary_payload = binary_payload.decode('utf-8') # todo check
binary_payload = "".join(map(chr, binary_payload))
split_arguments = []
# print("".join(map(chr, binary_payload)), binary_payload, expected_cmd_params)
if len(expected_cmd_params) > 0:
split_arguments = binary_payload.split(NULL_CHAR, len(expected_cmd_params) - 1)
elif binary_payload:
raise ProtocolError('Expected no binary payload: %s' % get_command_name(cmd_type))
# This is a sanity check on the binary_payload.split() phase
# We should never be able to get here with any VALID gearman data
if len(split_arguments) != len(expected_cmd_params):
raise ProtocolError('Received %d argument(s), expecting %d argument(s): %s' % (len(split_arguments), len(expected_cmd_params), get_command_name(cmd_type)))
# Iterate through the split arguments and assign them labels based on their order
cmd_args = dict((param_label, param_value) for param_label, param_value in zip(expected_cmd_params, split_arguments))
return cmd_type, cmd_args, expected_packet_size
def pack_binary_command(cmd_type, cmd_args, is_response=False):
"""Packs the given command using the parameter ordering specified in GEARMAN_PARAMS_FOR_COMMAND.
*NOTE* Expects that all arguments in cmd_args are already str's.
"""
expected_cmd_params = GEARMAN_PARAMS_FOR_COMMAND.get(cmd_type, None)
if expected_cmd_params is None or cmd_type == GEARMAN_COMMAND_TEXT_COMMAND:
raise ProtocolError('Received unknown binary command: %s' % get_command_name(cmd_type))
expected_parameter_set = set(expected_cmd_params)
received_parameter_set = set(cmd_args.keys())
if expected_parameter_set != received_parameter_set:
raise ProtocolError('Received arguments did not match expected arguments: %r != %r' % (expected_parameter_set, received_parameter_set))
# Select the right expected magic
if is_response:
magic = MAGIC_RES_STRING
else:
magic = MAGIC_REQ_STRING
# !NOTE! str should be replaced with bytes in Python 3.x
# We will iterate in ORDER and str all our command arguments
if compat.any(type(param_value) != str for param_value in cmd_args.values()):
raise ProtocolError('Received non-binary arguments: %r' % cmd_args)
data_items = [cmd_args[param] for param in expected_cmd_params]
binary_payload = NULL_CHAR.join(data_items)
# Pack the header in the !4sII format then append the binary payload
payload_size = len(binary_payload)
packing_format = '!4sII%ds' % payload_size
return struct.pack(packing_format, magic.encode(), cmd_type, payload_size, binary_payload.encode())
def parse_text_command(in_buffer):
"""Parse a text command and return a single line at a time"""
cmd_type = None
cmd_args = None
cmd_len = 0
in_buffer = "".join(map(chr, in_buffer))
if '\n' not in in_buffer:
return cmd_type, cmd_args, cmd_len
text_command, in_buffer = in_buffer.split('\n', 1)
if NULL_CHAR in text_command:
raise ProtocolError('Received unexpected character: %s' % text_command)
# Fake gearman command "TEXT_COMMAND" used to process server admin client responses
cmd_type = GEARMAN_COMMAND_TEXT_COMMAND
cmd_args = dict(raw_text=text_command)
cmd_len = len(text_command) + 1
return cmd_type, cmd_args, cmd_len
def pack_text_command(cmd_type, cmd_args):
"""Parse a text command and return a single line at a time"""
if cmd_type != GEARMAN_COMMAND_TEXT_COMMAND:
raise ProtocolError('Unknown cmd_type: Received %s, expecting %s' % (get_command_name(cmd_type), get_command_name(GEARMAN_COMMAND_TEXT_COMMAND)))
cmd_line = cmd_args.get('raw_text')
if cmd_line is None:
raise ProtocolError('Did not receive arguments any valid arguments: %s' % cmd_args)
return str(cmd_line)
|
py | 1a4a58c716b96e9f0905e5399fc7207122e8f29e | #
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from octodns.record import ARecord, AaaaRecord, AliasRecord, CaaRecord, \
CaaValue, CnameRecord, DnameRecord, Create, Delete, GeoValue, LocRecord, \
LocValue, MxRecord, MxValue, NaptrRecord, NaptrValue, NsRecord, \
PtrRecord, Record, SshfpRecord, SshfpValue, SpfRecord, SrvRecord, \
SrvValue, TxtRecord, Update, UrlfwdRecord, UrlfwdValue, ValidationError, \
_Dynamic, _DynamicPool, _DynamicRule
from octodns.zone import Zone
from helpers import DynamicProvider, GeoProvider, SimpleProvider
class TestRecord(TestCase):
zone = Zone('unit.tests.', [])
def test_lowering(self):
record = ARecord(self.zone, 'MiXeDcAsE', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
self.assertEquals('mixedcase', record.name)
def test_alias_lowering_value(self):
upper_record = AliasRecord(self.zone, 'aliasUppwerValue', {
'ttl': 30,
'type': 'ALIAS',
'value': 'GITHUB.COM',
})
lower_record = AliasRecord(self.zone, 'aliasLowerValue', {
'ttl': 30,
'type': 'ALIAS',
'value': 'github.com',
})
self.assertEquals(upper_record.value, lower_record.value)
def test_cname_lowering_value(self):
upper_record = CnameRecord(self.zone, 'CnameUppwerValue', {
'ttl': 30,
'type': 'CNAME',
'value': 'GITHUB.COM',
})
lower_record = CnameRecord(self.zone, 'CnameLowerValue', {
'ttl': 30,
'type': 'CNAME',
'value': 'github.com',
})
self.assertEquals(upper_record.value, lower_record.value)
def test_dname_lowering_value(self):
upper_record = DnameRecord(self.zone, 'DnameUppwerValue', {
'ttl': 30,
'type': 'DNAME',
'value': 'GITHUB.COM',
})
lower_record = DnameRecord(self.zone, 'DnameLowerValue', {
'ttl': 30,
'type': 'DNAME',
'value': 'github.com',
})
self.assertEquals(upper_record.value, lower_record.value)
def test_ptr_lowering_value(self):
upper_record = PtrRecord(self.zone, 'PtrUppwerValue', {
'ttl': 30,
'type': 'PTR',
'value': 'GITHUB.COM',
})
lower_record = PtrRecord(self.zone, 'PtrLowerValue', {
'ttl': 30,
'type': 'PTR',
'value': 'github.com',
})
self.assertEquals(upper_record.value, lower_record.value)
def test_a_and_record(self):
a_values = ['1.2.3.4', '2.2.3.4']
a_data = {'ttl': 30, 'values': a_values}
a = ARecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values, a.values)
self.assertEquals(a_data, a.data)
b_value = '3.2.3.4'
b_data = {'ttl': 30, 'value': b_value}
b = ARecord(self.zone, 'b', b_data)
self.assertEquals([b_value], b.values)
self.assertEquals(b_data, b.data)
# top-level
data = {'ttl': 30, 'value': '4.2.3.4'}
self.assertEquals(self.zone.name, ARecord(self.zone, '', data).fqdn)
self.assertEquals(self.zone.name, ARecord(self.zone, None, data).fqdn)
# ARecord equate with itself
self.assertTrue(a == a)
# Records with differing names and same type don't equate
self.assertFalse(a == b)
# Records with same name & type equate even if ttl is different
self.assertTrue(a == ARecord(self.zone, 'a',
{'ttl': 31, 'values': a_values}))
# Records with same name & type equate even if values are different
self.assertTrue(a == ARecord(self.zone, 'a',
{'ttl': 30, 'value': b_value}))
target = SimpleProvider()
# no changes if self
self.assertFalse(a.changes(a, target))
# no changes if clone
other = ARecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
self.assertFalse(a.changes(other, target))
# changes if ttl modified
other.ttl = 31
update = a.changes(other, target)
self.assertEquals(a, update.existing)
self.assertEquals(other, update.new)
# changes if values modified
other.ttl = a.ttl
other.values = ['4.4.4.4']
update = a.changes(other, target)
self.assertEquals(a, update.existing)
self.assertEquals(other, update.new)
# Hashing
records = set()
records.add(a)
self.assertTrue(a in records)
self.assertFalse(b in records)
records.add(b)
self.assertTrue(b in records)
# __repr__ doesn't blow up
a.__repr__()
# Record.__repr__ does
with self.assertRaises(NotImplementedError):
class DummyRecord(Record):
def __init__(self):
pass
DummyRecord().__repr__()
def test_values_mixin_data(self):
# no values, no value or values in data
a = ARecord(self.zone, '', {
'type': 'A',
'ttl': 600,
'values': []
})
self.assertNotIn('values', a.data)
# empty value, no value or values in data
b = ARecord(self.zone, '', {
'type': 'A',
'ttl': 600,
'values': ['']
})
self.assertNotIn('value', b.data)
# empty/None values, no value or values in data
c = ARecord(self.zone, '', {
'type': 'A',
'ttl': 600,
'values': ['', None]
})
self.assertNotIn('values', c.data)
# empty/None values and valid, value in data
c = ARecord(self.zone, '', {
'type': 'A',
'ttl': 600,
'values': ['', None, '10.10.10.10']
})
self.assertNotIn('values', c.data)
self.assertEqual('10.10.10.10', c.data['value'])
def test_value_mixin_data(self):
# unspecified value, no value in data
a = AliasRecord(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': None
})
self.assertNotIn('value', a.data)
# unspecified value, no value in data
a = AliasRecord(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': ''
})
self.assertNotIn('value', a.data)
def test_geo(self):
geo_data = {'ttl': 42, 'values': ['5.2.3.4', '6.2.3.4'],
'geo': {'AF': ['1.1.1.1'],
'AS-JP': ['2.2.2.2', '3.3.3.3'],
'NA-US': ['4.4.4.4', '5.5.5.5'],
'NA-US-CA': ['6.6.6.6', '7.7.7.7']}}
geo = ARecord(self.zone, 'geo', geo_data)
self.assertEquals(geo_data, geo.data)
other_data = {'ttl': 42, 'values': ['5.2.3.4', '6.2.3.4'],
'geo': {'AF': ['1.1.1.1'],
'AS-JP': ['2.2.2.2', '3.3.3.3'],
'NA-US': ['4.4.4.4', '5.5.5.5'],
'NA-US-CA': ['6.6.6.6', '7.7.7.7']}}
other = ARecord(self.zone, 'geo', other_data)
self.assertEquals(other_data, other.data)
simple_target = SimpleProvider()
geo_target = GeoProvider()
# Geo provider doesn't consider identical geo to be changes
self.assertFalse(geo.changes(geo, geo_target))
# geo values don't impact equality
other.geo['AF'].values = ['9.9.9.9']
self.assertTrue(geo == other)
# Non-geo supporting provider doesn't consider geo diffs to be changes
self.assertFalse(geo.changes(other, simple_target))
# Geo provider does consider geo diffs to be changes
self.assertTrue(geo.changes(other, geo_target))
# Object without geo doesn't impact equality
other.geo = {}
self.assertTrue(geo == other)
# Non-geo supporting provider doesn't consider lack of geo a diff
self.assertFalse(geo.changes(other, simple_target))
# Geo provider does consider lack of geo diffs to be changes
self.assertTrue(geo.changes(other, geo_target))
# __repr__ doesn't blow up
geo.__repr__()
def assertMultipleValues(self, _type, a_values, b_value):
a_data = {'ttl': 30, 'values': a_values}
a = _type(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values, a.values)
self.assertEquals(a_data, a.data)
b_data = {'ttl': 30, 'value': b_value}
b = _type(self.zone, 'b', b_data)
self.assertEquals([b_value], b.values)
self.assertEquals(b_data, b.data)
def test_aaaa(self):
a_values = ['2001:db8:3c4d:15::1a2f:1a2b',
'2001:db8:3c4d:15::1a2f:1a3b']
b_value = '2001:db8:3c4d:15::1a2f:1a4b'
self.assertMultipleValues(AaaaRecord, a_values, b_value)
# Specifically validate that we normalize IPv6 addresses
values = ['2001:db8:3c4d:15:0000:0000:1a2f:1a2b',
'2001:0db8:3c4d:0015::1a2f:1a3b']
data = {
'ttl': 30,
'values': values,
}
record = AaaaRecord(self.zone, 'aaaa', data)
self.assertEquals(a_values, record.values)
def assertSingleValue(self, _type, a_value, b_value):
a_data = {'ttl': 30, 'value': a_value}
a = _type(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_value, a.value)
self.assertEquals(a_data, a.data)
b_data = {'ttl': 30, 'value': b_value}
b = _type(self.zone, 'b', b_data)
self.assertEquals(b_value, b.value)
self.assertEquals(b_data, b.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in value causes change
other = _type(self.zone, 'a', {'ttl': 30, 'value': b_value})
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_alias(self):
a_data = {'ttl': 0, 'value': 'www.unit.tests.'}
a = AliasRecord(self.zone, '', a_data)
self.assertEquals('', a.name)
self.assertEquals('unit.tests.', a.fqdn)
self.assertEquals(0, a.ttl)
self.assertEquals(a_data['value'], a.value)
self.assertEquals(a_data, a.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in value causes change
other = AliasRecord(self.zone, 'a', a_data)
other.value = 'foo.unit.tests.'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_caa(self):
a_values = [{
'flags': 0,
'tag': 'issue',
'value': 'ca.example.net',
}, {
'flags': 128,
'tag': 'iodef',
'value': 'mailto:[email protected]',
}]
a_data = {'ttl': 30, 'values': a_values}
a = CaaRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['flags'], a.values[0].flags)
self.assertEquals(a_values[0]['tag'], a.values[0].tag)
self.assertEquals(a_values[0]['value'], a.values[0].value)
self.assertEquals(a_values[1]['flags'], a.values[1].flags)
self.assertEquals(a_values[1]['tag'], a.values[1].tag)
self.assertEquals(a_values[1]['value'], a.values[1].value)
self.assertEquals(a_data, a.data)
b_value = {
'tag': 'iodef',
'value': 'http://iodef.example.com/',
}
b_data = {'ttl': 30, 'value': b_value}
b = CaaRecord(self.zone, 'b', b_data)
self.assertEquals(0, b.values[0].flags)
self.assertEquals(b_value['tag'], b.values[0].tag)
self.assertEquals(b_value['value'], b.values[0].value)
b_data['value']['flags'] = 0
self.assertEquals(b_data, b.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in flags causes change
other = CaaRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].flags = 128
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in tag causes change
other.values[0].flags = a.values[0].flags
other.values[0].tag = 'foo'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in value causes change
other.values[0].tag = a.values[0].tag
other.values[0].value = 'bar'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_cname(self):
self.assertSingleValue(CnameRecord, 'target.foo.com.',
'other.foo.com.')
def test_dname(self):
self.assertSingleValue(DnameRecord, 'target.foo.com.',
'other.foo.com.')
def test_loc(self):
a_values = [{
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}]
a_data = {'ttl': 30, 'values': a_values}
a = LocRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['lat_degrees'], a.values[0].lat_degrees)
self.assertEquals(a_values[0]['lat_minutes'], a.values[0].lat_minutes)
self.assertEquals(a_values[0]['lat_seconds'], a.values[0].lat_seconds)
self.assertEquals(a_values[0]['lat_direction'],
a.values[0].lat_direction)
self.assertEquals(a_values[0]['long_degrees'],
a.values[0].long_degrees)
self.assertEquals(a_values[0]['long_minutes'],
a.values[0].long_minutes)
self.assertEquals(a_values[0]['long_seconds'],
a.values[0].long_seconds)
self.assertEquals(a_values[0]['long_direction'],
a.values[0].long_direction)
self.assertEquals(a_values[0]['altitude'], a.values[0].altitude)
self.assertEquals(a_values[0]['size'], a.values[0].size)
self.assertEquals(a_values[0]['precision_horz'],
a.values[0].precision_horz)
self.assertEquals(a_values[0]['precision_vert'],
a.values[0].precision_vert)
b_value = {
'lat_degrees': 32,
'lat_minutes': 7,
'lat_seconds': 19,
'lat_direction': 'S',
'long_degrees': 116,
'long_minutes': 2,
'long_seconds': 25,
'long_direction': 'E',
'altitude': 10,
'size': 1,
'precision_horz': 10000,
'precision_vert': 10,
}
b_data = {'ttl': 30, 'value': b_value}
b = LocRecord(self.zone, 'b', b_data)
self.assertEquals(b_value['lat_degrees'], b.values[0].lat_degrees)
self.assertEquals(b_value['lat_minutes'], b.values[0].lat_minutes)
self.assertEquals(b_value['lat_seconds'], b.values[0].lat_seconds)
self.assertEquals(b_value['lat_direction'], b.values[0].lat_direction)
self.assertEquals(b_value['long_degrees'], b.values[0].long_degrees)
self.assertEquals(b_value['long_minutes'], b.values[0].long_minutes)
self.assertEquals(b_value['long_seconds'], b.values[0].long_seconds)
self.assertEquals(b_value['long_direction'],
b.values[0].long_direction)
self.assertEquals(b_value['altitude'], b.values[0].altitude)
self.assertEquals(b_value['size'], b.values[0].size)
self.assertEquals(b_value['precision_horz'],
b.values[0].precision_horz)
self.assertEquals(b_value['precision_vert'],
b.values[0].precision_vert)
self.assertEquals(b_data, b.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in lat_direction causes change
other = LocRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].lat_direction = 'N'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in altitude causes change
other.values[0].altitude = a.values[0].altitude
other.values[0].altitude = -10
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_mx(self):
a_values = [{
'preference': 10,
'exchange': 'smtp1.'
}, {
'priority': 20,
'value': 'smtp2.'
}]
a_data = {'ttl': 30, 'values': a_values}
a = MxRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['preference'], a.values[0].preference)
self.assertEquals(a_values[0]['exchange'], a.values[0].exchange)
self.assertEquals(a_values[1]['priority'], a.values[1].preference)
self.assertEquals(a_values[1]['value'], a.values[1].exchange)
a_data['values'][1] = {
'preference': 20,
'exchange': 'smtp2.',
}
self.assertEquals(a_data, a.data)
b_value = {
'preference': 0,
'exchange': 'smtp3.',
}
b_data = {'ttl': 30, 'value': b_value}
b = MxRecord(self.zone, 'b', b_data)
self.assertEquals(b_value['preference'], b.values[0].preference)
self.assertEquals(b_value['exchange'], b.values[0].exchange)
self.assertEquals(b_data, b.data)
a_upper_values = [{
'preference': 10,
'exchange': 'SMTP1.'
}, {
'priority': 20,
'value': 'SMTP2.'
}]
a_upper_data = {'ttl': 30, 'values': a_upper_values}
a_upper = MxRecord(self.zone, 'a', a_upper_data)
self.assertEquals(a_upper.data, a.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in preference causes change
other = MxRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].preference = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in value causes change
other.values[0].preference = a.values[0].preference
other.values[0].exchange = 'smtpX'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_naptr(self):
a_values = [{
'order': 10,
'preference': 11,
'flags': 'X',
'service': 'Y',
'regexp': 'Z',
'replacement': '.',
}, {
'order': 20,
'preference': 21,
'flags': 'A',
'service': 'B',
'regexp': 'C',
'replacement': 'foo.com',
}]
a_data = {'ttl': 30, 'values': a_values}
a = NaptrRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
for i in (0, 1):
for k in a_values[0].keys():
self.assertEquals(a_values[i][k], getattr(a.values[i], k))
self.assertEquals(a_data, a.data)
b_value = {
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
}
b_data = {'ttl': 30, 'value': b_value}
b = NaptrRecord(self.zone, 'b', b_data)
for k in a_values[0].keys():
self.assertEquals(b_value[k], getattr(b.values[0], k))
self.assertEquals(b_data, b.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in priority causes change
other = NaptrRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].order = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in replacement causes change
other.values[0].order = a.values[0].order
other.values[0].replacement = 'smtpX'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# full sorting
# equivalent
b_naptr_value = b.values[0]
self.assertTrue(b_naptr_value == b_naptr_value)
self.assertFalse(b_naptr_value != b_naptr_value)
self.assertTrue(b_naptr_value <= b_naptr_value)
self.assertTrue(b_naptr_value >= b_naptr_value)
# by order
self.assertTrue(b_naptr_value > NaptrValue({
'order': 10,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
}))
self.assertTrue(b_naptr_value < NaptrValue({
'order': 40,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
}))
# by preference
self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 10,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
}))
self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 40,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
}))
# by flags
self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 31,
'flags': 'A',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
}))
self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 31,
'flags': 'Z',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
}))
# by service
self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'A',
'regexp': 'O',
'replacement': 'x',
}))
self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'Z',
'regexp': 'O',
'replacement': 'x',
}))
# by regexp
self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'A',
'replacement': 'x',
}))
self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'Z',
'replacement': 'x',
}))
# by replacement
self.assertTrue(b_naptr_value > NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'a',
}))
self.assertTrue(b_naptr_value < NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'z',
}))
# __repr__ doesn't blow up
a.__repr__()
# Hash
v = NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'z',
})
o = NaptrValue({
'order': 30,
'preference': 32,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'z',
})
values = set()
values.add(v)
self.assertTrue(v in values)
self.assertFalse(o in values)
values.add(o)
self.assertTrue(o in values)
def test_ns(self):
a_values = ['5.6.7.8.', '6.7.8.9.', '7.8.9.0.']
a_data = {'ttl': 30, 'values': a_values}
a = NsRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values, a.values)
self.assertEquals(a_data, a.data)
b_value = '9.8.7.6.'
b_data = {'ttl': 30, 'value': b_value}
b = NsRecord(self.zone, 'b', b_data)
self.assertEquals([b_value], b.values)
self.assertEquals(b_data, b.data)
def test_sshfp(self):
a_values = [{
'algorithm': 10,
'fingerprint_type': 11,
'fingerprint': 'abc123',
}, {
'algorithm': 20,
'fingerprint_type': 21,
'fingerprint': 'def456',
}]
a_data = {'ttl': 30, 'values': a_values}
a = SshfpRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['algorithm'], a.values[0].algorithm)
self.assertEquals(a_values[0]['fingerprint_type'],
a.values[0].fingerprint_type)
self.assertEquals(a_values[0]['fingerprint'], a.values[0].fingerprint)
self.assertEquals(a_data, a.data)
b_value = {
'algorithm': 30,
'fingerprint_type': 31,
'fingerprint': 'ghi789',
}
b_data = {'ttl': 30, 'value': b_value}
b = SshfpRecord(self.zone, 'b', b_data)
self.assertEquals(b_value['algorithm'], b.values[0].algorithm)
self.assertEquals(b_value['fingerprint_type'],
b.values[0].fingerprint_type)
self.assertEquals(b_value['fingerprint'], b.values[0].fingerprint)
self.assertEquals(b_data, b.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in algorithm causes change
other = SshfpRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].algorithm = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in fingerprint_type causes change
other = SshfpRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].algorithm = a.values[0].algorithm
other.values[0].fingerprint_type = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in fingerprint causes change
other = SshfpRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].fingerprint_type = a.values[0].fingerprint_type
other.values[0].fingerprint = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_spf(self):
a_values = ['spf1 -all', 'spf1 -hrm']
b_value = 'spf1 -other'
self.assertMultipleValues(SpfRecord, a_values, b_value)
def test_srv(self):
a_values = [{
'priority': 10,
'weight': 11,
'port': 12,
'target': 'server1',
}, {
'priority': 20,
'weight': 21,
'port': 22,
'target': 'server2',
}]
a_data = {'ttl': 30, 'values': a_values}
a = SrvRecord(self.zone, '_a._tcp', a_data)
self.assertEquals('_a._tcp', a.name)
self.assertEquals('_a._tcp.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['priority'], a.values[0].priority)
self.assertEquals(a_values[0]['weight'], a.values[0].weight)
self.assertEquals(a_values[0]['port'], a.values[0].port)
self.assertEquals(a_values[0]['target'], a.values[0].target)
self.assertEquals(a_data, a.data)
b_value = {
'priority': 30,
'weight': 31,
'port': 32,
'target': 'server3',
}
b_data = {'ttl': 30, 'value': b_value}
b = SrvRecord(self.zone, '_b._tcp', b_data)
self.assertEquals(b_value['priority'], b.values[0].priority)
self.assertEquals(b_value['weight'], b.values[0].weight)
self.assertEquals(b_value['port'], b.values[0].port)
self.assertEquals(b_value['target'], b.values[0].target)
self.assertEquals(b_data, b.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in priority causes change
other = SrvRecord(self.zone, '_a._icmp',
{'ttl': 30, 'values': a_values})
other.values[0].priority = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in weight causes change
other.values[0].priority = a.values[0].priority
other.values[0].weight = 33
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in port causes change
other.values[0].weight = a.values[0].weight
other.values[0].port = 44
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in target causes change
other.values[0].port = a.values[0].port
other.values[0].target = 'serverX'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_txt(self):
a_values = ['a one', 'a two']
b_value = 'b other'
self.assertMultipleValues(TxtRecord, a_values, b_value)
def test_urlfwd(self):
a_values = [{
'path': '/',
'target': 'http://foo',
'code': 301,
'masking': 2,
'query': 0,
}, {
'path': '/target',
'target': 'http://target',
'code': 302,
'masking': 2,
'query': 0,
}]
a_data = {'ttl': 30, 'values': a_values}
a = UrlfwdRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['path'], a.values[0].path)
self.assertEquals(a_values[0]['target'], a.values[0].target)
self.assertEquals(a_values[0]['code'], a.values[0].code)
self.assertEquals(a_values[0]['masking'], a.values[0].masking)
self.assertEquals(a_values[0]['query'], a.values[0].query)
self.assertEquals(a_values[1]['path'], a.values[1].path)
self.assertEquals(a_values[1]['target'], a.values[1].target)
self.assertEquals(a_values[1]['code'], a.values[1].code)
self.assertEquals(a_values[1]['masking'], a.values[1].masking)
self.assertEquals(a_values[1]['query'], a.values[1].query)
self.assertEquals(a_data, a.data)
b_value = {
'path': '/',
'target': 'http://location',
'code': 301,
'masking': 2,
'query': 0,
}
b_data = {'ttl': 30, 'value': b_value}
b = UrlfwdRecord(self.zone, 'b', b_data)
self.assertEquals(b_value['path'], b.values[0].path)
self.assertEquals(b_value['target'], b.values[0].target)
self.assertEquals(b_value['code'], b.values[0].code)
self.assertEquals(b_value['masking'], b.values[0].masking)
self.assertEquals(b_value['query'], b.values[0].query)
self.assertEquals(b_data, b.data)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in path causes change
other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].path = '/change'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in target causes change
other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].target = 'http://target'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in code causes change
other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].code = 302
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in masking causes change
other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].masking = 0
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in query causes change
other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].query = 1
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# hash
v = UrlfwdValue({
'path': '/',
'target': 'http://place',
'code': 301,
'masking': 2,
'query': 0,
})
o = UrlfwdValue({
'path': '/location',
'target': 'http://redirect',
'code': 302,
'masking': 2,
'query': 0,
})
values = set()
values.add(v)
self.assertTrue(v in values)
self.assertFalse(o in values)
values.add(o)
self.assertTrue(o in values)
# __repr__ doesn't blow up
a.__repr__()
def test_record_new(self):
txt = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some text',
})
self.assertIsInstance(txt, TxtRecord)
self.assertEquals('TXT', txt._type)
self.assertEquals(['some text'], txt.values)
# Missing type
with self.assertRaises(Exception) as ctx:
Record.new(self.zone, 'unknown', {})
self.assertTrue('missing type' in str(ctx.exception))
# Unknown type
with self.assertRaises(Exception) as ctx:
Record.new(self.zone, 'unknown', {
'type': 'XXX',
})
self.assertTrue('Unknown record type' in str(ctx.exception))
def test_record_copy(self):
a = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
})
# Identical copy.
b = a.copy()
self.assertIsInstance(b, ARecord)
self.assertEquals('unit.tests.', b.zone.name)
self.assertEquals('a', b.name)
self.assertEquals('A', b._type)
self.assertEquals(['1.2.3.4'], b.values)
# Copy with another zone object.
c_zone = Zone('other.tests.', [])
c = a.copy(c_zone)
self.assertIsInstance(c, ARecord)
self.assertEquals('other.tests.', c.zone.name)
self.assertEquals('a', c.name)
self.assertEquals('A', c._type)
self.assertEquals(['1.2.3.4'], c.values)
# Record with no record type specified in data.
d_data = {
'ttl': 600,
'values': ['just a test']
}
d = TxtRecord(self.zone, 'txt', d_data)
d.copy()
self.assertEquals('TXT', d._type)
def test_dynamic_record_copy(self):
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}],
},
},
'rules': [{
'pool': 'one',
}],
},
'octodns': {
'healthcheck': {
'protocol': 'TCP',
'port': 80,
},
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
record1 = Record.new(self.zone, 'a', a_data)
record2 = record1.copy()
self.assertEqual(record1._octodns, record2._octodns)
def test_change(self):
existing = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some text',
})
new = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some change',
})
create = Create(new)
self.assertEquals(new.values, create.record.values)
update = Update(existing, new)
self.assertEquals(new.values, update.record.values)
delete = Delete(existing)
self.assertEquals(existing.values, delete.record.values)
def test_geo_value(self):
code = 'NA-US-CA'
values = ['1.2.3.4']
geo = GeoValue(code, values)
self.assertEquals(code, geo.code)
self.assertEquals('NA', geo.continent_code)
self.assertEquals('US', geo.country_code)
self.assertEquals('CA', geo.subdivision_code)
self.assertEquals(values, geo.values)
self.assertEquals(['NA-US', 'NA'], list(geo.parents))
a = GeoValue('NA-US-CA', values)
b = GeoValue('AP-JP', values)
c = GeoValue('NA-US-CA', ['2.3.4.5'])
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertEqual(c, c)
self.assertNotEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(b, a)
self.assertNotEqual(b, c)
self.assertNotEqual(c, a)
self.assertNotEqual(c, b)
self.assertTrue(a > b)
self.assertTrue(a < c)
self.assertTrue(b < a)
self.assertTrue(b < c)
self.assertTrue(c > a)
self.assertTrue(c > b)
self.assertTrue(a >= a)
self.assertTrue(a >= b)
self.assertTrue(a <= c)
self.assertTrue(b <= a)
self.assertTrue(b <= b)
self.assertTrue(b <= c)
self.assertTrue(c > a)
self.assertTrue(c > b)
self.assertTrue(c >= b)
def test_healthcheck(self):
new = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
'octodns': {
'healthcheck': {
'path': '/_ready',
'host': 'bleep.bloop',
'protocol': 'HTTP',
'port': 8080,
}
}
})
self.assertEquals('/_ready', new.healthcheck_path)
self.assertEquals('bleep.bloop', new.healthcheck_host())
self.assertEquals('HTTP', new.healthcheck_protocol)
self.assertEquals(8080, new.healthcheck_port)
# empty host value in healthcheck
new = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
'octodns': {
'healthcheck': {
'path': '/_ready',
'host': None,
'protocol': 'HTTP',
'port': 8080,
}
}
})
self.assertEquals('1.2.3.4', new.healthcheck_host(value="1.2.3.4"))
new = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
})
self.assertEquals('/_dns', new.healthcheck_path)
self.assertEquals('a.unit.tests', new.healthcheck_host())
self.assertEquals('HTTPS', new.healthcheck_protocol)
self.assertEquals(443, new.healthcheck_port)
def test_healthcheck_tcp(self):
new = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
'octodns': {
'healthcheck': {
'path': '/ignored',
'host': 'completely.ignored',
'protocol': 'TCP',
'port': 8080,
}
}
})
self.assertIsNone(new.healthcheck_path)
self.assertIsNone(new.healthcheck_host())
self.assertEquals('TCP', new.healthcheck_protocol)
self.assertEquals(8080, new.healthcheck_port)
new = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
'octodns': {
'healthcheck': {
'protocol': 'TCP',
}
}
})
self.assertIsNone(new.healthcheck_path)
self.assertIsNone(new.healthcheck_host())
self.assertEquals('TCP', new.healthcheck_protocol)
self.assertEquals(443, new.healthcheck_port)
def test_inored(self):
new = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some change',
'octodns': {
'ignored': True,
}
})
self.assertTrue(new.ignored)
new = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some change',
'octodns': {
'ignored': False,
}
})
self.assertFalse(new.ignored)
new = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some change',
})
self.assertFalse(new.ignored)
def test_ordering_functions(self):
a = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
})
b = Record.new(self.zone, 'b', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
})
c = Record.new(self.zone, 'c', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
})
aaaa = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'AAAA',
'value': '2601:644:500:e210:62f8:1dff:feb8:947a',
})
self.assertEquals(a, a)
self.assertEquals(b, b)
self.assertEquals(c, c)
self.assertEquals(aaaa, aaaa)
self.assertNotEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(a, aaaa)
self.assertNotEqual(b, a)
self.assertNotEqual(b, c)
self.assertNotEqual(b, aaaa)
self.assertNotEqual(c, a)
self.assertNotEqual(c, b)
self.assertNotEqual(c, aaaa)
self.assertNotEqual(aaaa, a)
self.assertNotEqual(aaaa, b)
self.assertNotEqual(aaaa, c)
self.assertTrue(a < b)
self.assertTrue(a < c)
self.assertTrue(a < aaaa)
self.assertTrue(b > a)
self.assertTrue(b < c)
self.assertTrue(b > aaaa)
self.assertTrue(c > a)
self.assertTrue(c > b)
self.assertTrue(c > aaaa)
self.assertTrue(aaaa > a)
self.assertTrue(aaaa < b)
self.assertTrue(aaaa < c)
self.assertTrue(a <= a)
self.assertTrue(a <= b)
self.assertTrue(a <= c)
self.assertTrue(a <= aaaa)
self.assertTrue(b >= a)
self.assertTrue(b >= b)
self.assertTrue(b <= c)
self.assertTrue(b >= aaaa)
self.assertTrue(c >= a)
self.assertTrue(c >= b)
self.assertTrue(c >= c)
self.assertTrue(c >= aaaa)
self.assertTrue(aaaa >= a)
self.assertTrue(aaaa <= b)
self.assertTrue(aaaa <= c)
self.assertTrue(aaaa <= aaaa)
def test_caa_value(self):
a = CaaValue({'flags': 0, 'tag': 'a', 'value': 'v'})
b = CaaValue({'flags': 1, 'tag': 'a', 'value': 'v'})
c = CaaValue({'flags': 0, 'tag': 'c', 'value': 'v'})
d = CaaValue({'flags': 0, 'tag': 'a', 'value': 'z'})
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertEqual(c, c)
self.assertEqual(d, d)
self.assertNotEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(a, d)
self.assertNotEqual(b, a)
self.assertNotEqual(b, c)
self.assertNotEqual(b, d)
self.assertNotEqual(c, a)
self.assertNotEqual(c, b)
self.assertNotEqual(c, d)
self.assertTrue(a < b)
self.assertTrue(a < c)
self.assertTrue(a < d)
self.assertTrue(b > a)
self.assertTrue(b > c)
self.assertTrue(b > d)
self.assertTrue(c > a)
self.assertTrue(c < b)
self.assertTrue(c > d)
self.assertTrue(d > a)
self.assertTrue(d < b)
self.assertTrue(d < c)
self.assertTrue(a <= b)
self.assertTrue(a <= c)
self.assertTrue(a <= d)
self.assertTrue(a <= a)
self.assertTrue(a >= a)
self.assertTrue(b >= a)
self.assertTrue(b >= c)
self.assertTrue(b >= d)
self.assertTrue(b >= b)
self.assertTrue(b <= b)
self.assertTrue(c >= a)
self.assertTrue(c <= b)
self.assertTrue(c >= d)
self.assertTrue(c >= c)
self.assertTrue(c <= c)
self.assertTrue(d >= a)
self.assertTrue(d <= b)
self.assertTrue(d <= c)
self.assertTrue(d >= d)
self.assertTrue(d <= d)
def test_loc_value(self):
a = LocValue({
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
})
b = LocValue({
'lat_degrees': 32,
'lat_minutes': 7,
'lat_seconds': 19,
'lat_direction': 'S',
'long_degrees': 116,
'long_minutes': 2,
'long_seconds': 25,
'long_direction': 'E',
'altitude': 10,
'size': 1,
'precision_horz': 10000,
'precision_vert': 10,
})
c = LocValue({
'lat_degrees': 53,
'lat_minutes': 14,
'lat_seconds': 10,
'lat_direction': 'N',
'long_degrees': 2,
'long_minutes': 18,
'long_seconds': 26,
'long_direction': 'W',
'altitude': 10,
'size': 1,
'precision_horz': 1000,
'precision_vert': 10,
})
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertEqual(c, c)
self.assertNotEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(b, a)
self.assertNotEqual(b, c)
self.assertNotEqual(c, a)
self.assertNotEqual(c, b)
self.assertTrue(a < b)
self.assertTrue(a < c)
self.assertTrue(b > a)
self.assertTrue(b < c)
self.assertTrue(c > a)
self.assertTrue(c > b)
self.assertTrue(a <= b)
self.assertTrue(a <= c)
self.assertTrue(a <= a)
self.assertTrue(a >= a)
self.assertTrue(b >= a)
self.assertTrue(b <= c)
self.assertTrue(b >= b)
self.assertTrue(b <= b)
self.assertTrue(c >= a)
self.assertTrue(c >= b)
self.assertTrue(c >= c)
self.assertTrue(c <= c)
# Hash
values = set()
values.add(a)
self.assertTrue(a in values)
self.assertFalse(b in values)
values.add(b)
self.assertTrue(b in values)
def test_mx_value(self):
a = MxValue({'preference': 0, 'priority': 'a', 'exchange': 'v',
'value': '1'})
b = MxValue({'preference': 10, 'priority': 'a', 'exchange': 'v',
'value': '2'})
c = MxValue({'preference': 0, 'priority': 'b', 'exchange': 'z',
'value': '3'})
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertEqual(c, c)
self.assertNotEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(b, a)
self.assertNotEqual(b, c)
self.assertNotEqual(c, a)
self.assertNotEqual(c, b)
self.assertTrue(a < b)
self.assertTrue(a < c)
self.assertTrue(b > a)
self.assertTrue(b > c)
self.assertTrue(c > a)
self.assertTrue(c < b)
self.assertTrue(a <= b)
self.assertTrue(a <= c)
self.assertTrue(a <= a)
self.assertTrue(a >= a)
self.assertTrue(b >= a)
self.assertTrue(b >= c)
self.assertTrue(b >= b)
self.assertTrue(b <= b)
self.assertTrue(c >= a)
self.assertTrue(c <= b)
self.assertTrue(c >= c)
self.assertTrue(c <= c)
def test_sshfp_value(self):
a = SshfpValue({'algorithm': 0, 'fingerprint_type': 0,
'fingerprint': 'abcd'})
b = SshfpValue({'algorithm': 1, 'fingerprint_type': 0,
'fingerprint': 'abcd'})
c = SshfpValue({'algorithm': 0, 'fingerprint_type': 1,
'fingerprint': 'abcd'})
d = SshfpValue({'algorithm': 0, 'fingerprint_type': 0,
'fingerprint': 'bcde'})
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertEqual(c, c)
self.assertEqual(d, d)
self.assertNotEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(a, d)
self.assertNotEqual(b, a)
self.assertNotEqual(b, c)
self.assertNotEqual(b, d)
self.assertNotEqual(c, a)
self.assertNotEqual(c, b)
self.assertNotEqual(c, d)
self.assertNotEqual(d, a)
self.assertNotEqual(d, b)
self.assertNotEqual(d, c)
self.assertTrue(a < b)
self.assertTrue(a < c)
self.assertTrue(b > a)
self.assertTrue(b > c)
self.assertTrue(c > a)
self.assertTrue(c < b)
self.assertTrue(a <= b)
self.assertTrue(a <= c)
self.assertTrue(a <= a)
self.assertTrue(a >= a)
self.assertTrue(b >= a)
self.assertTrue(b >= c)
self.assertTrue(b >= b)
self.assertTrue(b <= b)
self.assertTrue(c >= a)
self.assertTrue(c <= b)
self.assertTrue(c >= c)
self.assertTrue(c <= c)
# Hash
values = set()
values.add(a)
self.assertTrue(a in values)
self.assertFalse(b in values)
values.add(b)
self.assertTrue(b in values)
def test_srv_value(self):
a = SrvValue({'priority': 0, 'weight': 0, 'port': 0, 'target': 'foo.'})
b = SrvValue({'priority': 1, 'weight': 0, 'port': 0, 'target': 'foo.'})
c = SrvValue({'priority': 0, 'weight': 2, 'port': 0, 'target': 'foo.'})
d = SrvValue({'priority': 0, 'weight': 0, 'port': 3, 'target': 'foo.'})
e = SrvValue({'priority': 0, 'weight': 0, 'port': 0, 'target': 'mmm.'})
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertEqual(c, c)
self.assertEqual(d, d)
self.assertEqual(e, e)
self.assertNotEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(a, d)
self.assertNotEqual(a, e)
self.assertNotEqual(b, a)
self.assertNotEqual(b, c)
self.assertNotEqual(b, d)
self.assertNotEqual(b, e)
self.assertNotEqual(c, a)
self.assertNotEqual(c, b)
self.assertNotEqual(c, d)
self.assertNotEqual(c, e)
self.assertNotEqual(d, a)
self.assertNotEqual(d, b)
self.assertNotEqual(d, c)
self.assertNotEqual(d, e)
self.assertNotEqual(e, a)
self.assertNotEqual(e, b)
self.assertNotEqual(e, c)
self.assertNotEqual(e, d)
self.assertTrue(a < b)
self.assertTrue(a < c)
self.assertTrue(b > a)
self.assertTrue(b > c)
self.assertTrue(c > a)
self.assertTrue(c < b)
self.assertTrue(a <= b)
self.assertTrue(a <= c)
self.assertTrue(a <= a)
self.assertTrue(a >= a)
self.assertTrue(b >= a)
self.assertTrue(b >= c)
self.assertTrue(b >= b)
self.assertTrue(b <= b)
self.assertTrue(c >= a)
self.assertTrue(c <= b)
self.assertTrue(c >= c)
self.assertTrue(c <= c)
# Hash
values = set()
values.add(a)
self.assertTrue(a in values)
self.assertFalse(b in values)
values.add(b)
self.assertTrue(b in values)
class TestRecordValidation(TestCase):
zone = Zone('unit.tests.', [])
def test_base(self):
# fqdn length, DNS defins max as 253
with self.assertRaises(ValidationError) as ctx:
# The . will put this over the edge
name = 'x' * (253 - len(self.zone.name))
Record.new(self.zone, name, {
'ttl': 300,
'type': 'A',
'value': '1.2.3.4',
})
reason = ctx.exception.reasons[0]
self.assertTrue(reason.startswith('invalid fqdn, "xxxx'))
self.assertTrue(reason.endswith('.unit.tests." is too long at 254'
' chars, max is 253'))
# label length, DNS defines max as 63
with self.assertRaises(ValidationError) as ctx:
# The . will put this over the edge
name = 'x' * 64
Record.new(self.zone, name, {
'ttl': 300,
'type': 'A',
'value': '1.2.3.4',
})
reason = ctx.exception.reasons[0]
self.assertTrue(reason.startswith('invalid label, "xxxx'))
self.assertTrue(reason.endswith('xxx" is too long at 64'
' chars, max is 63'))
with self.assertRaises(ValidationError) as ctx:
name = 'foo.' + 'x' * 64 + '.bar'
Record.new(self.zone, name, {
'ttl': 300,
'type': 'A',
'value': '1.2.3.4',
})
reason = ctx.exception.reasons[0]
self.assertTrue(reason.startswith('invalid label, "xxxx'))
self.assertTrue(reason.endswith('xxx" is too long at 64'
' chars, max is 63'))
# should not raise with dots
name = 'xxxxxxxx.' * 10
Record.new(self.zone, name, {
'ttl': 300,
'type': 'A',
'value': '1.2.3.4',
})
# no ttl
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'A',
'value': '1.2.3.4',
})
self.assertEquals(['missing ttl'], ctx.exception.reasons)
# invalid ttl
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'A',
'ttl': -1,
'value': '1.2.3.4',
})
self.assertEquals('www.unit.tests.', ctx.exception.fqdn)
self.assertEquals(['invalid ttl'], ctx.exception.reasons)
# no exception if we're in lenient mode
Record.new(self.zone, 'www', {
'type': 'A',
'ttl': -1,
'value': '1.2.3.4',
}, lenient=True)
# __init__ may still blow up, even if validation is lenient
with self.assertRaises(KeyError) as ctx:
Record.new(self.zone, 'www', {
'type': 'A',
'ttl': -1,
}, lenient=True)
self.assertEquals(('value',), ctx.exception.args)
# no exception if we're in lenient mode from config
Record.new(self.zone, 'www', {
'octodns': {
'lenient': True
},
'type': 'A',
'ttl': -1,
'value': '1.2.3.4',
}, lenient=True)
def test_A_and_values_mixin(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'A',
'ttl': 600,
'value': '1.2.3.4',
})
Record.new(self.zone, '', {
'type': 'A',
'ttl': 600,
'values': [
'1.2.3.4',
]
})
Record.new(self.zone, '', {
'type': 'A',
'ttl': 600,
'values': [
'1.2.3.4',
'1.2.3.5',
]
})
# missing value(s), no value or value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'A',
'ttl': 600,
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# missing value(s), empty values
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'A',
'ttl': 600,
'values': []
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# missing value(s), None values
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'A',
'ttl': 600,
'values': None
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# missing value(s) and empty value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'A',
'ttl': 600,
'values': [None, '']
})
self.assertEquals(['missing value(s)',
'empty value'], ctx.exception.reasons)
# missing value(s), None value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'A',
'ttl': 600,
'value': None
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# empty value, empty string value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'A',
'ttl': 600,
'value': ''
})
self.assertEquals(['empty value'], ctx.exception.reasons)
# missing value(s) & ttl
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'A',
})
self.assertEquals(['missing ttl', 'missing value(s)'],
ctx.exception.reasons)
# invalid ipv4 address
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'A',
'ttl': 600,
'value': 'hello'
})
self.assertEquals(['invalid IPv4 address "hello"'],
ctx.exception.reasons)
# invalid ipv4 addresses
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'A',
'ttl': 600,
'values': ['hello', 'goodbye']
})
self.assertEquals([
'invalid IPv4 address "hello"',
'invalid IPv4 address "goodbye"'
], ctx.exception.reasons)
# invalid & valid ipv4 addresses, no ttl
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'A',
'values': ['1.2.3.4', 'hello', '5.6.7.8']
})
self.assertEquals([
'missing ttl',
'invalid IPv4 address "hello"',
], ctx.exception.reasons)
def test_AAAA_validation(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'value': '2601:644:500:e210:62f8:1dff:feb8:947a',
})
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'values': [
'2601:644:500:e210:62f8:1dff:feb8:947a',
]
})
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'values': [
'2601:644:500:e210:62f8:1dff:feb8:947a',
'2601:642:500:e210:62f8:1dff:feb8:947a',
]
})
# missing value(s), no value or value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# missing value(s), empty values
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'AAAA',
'ttl': 600,
'values': []
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# missing value(s), None values
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'AAAA',
'ttl': 600,
'values': None
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# missing value(s) and empty value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'AAAA',
'ttl': 600,
'values': [None, '']
})
self.assertEquals(['missing value(s)',
'empty value'], ctx.exception.reasons)
# missing value(s), None value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'AAAA',
'ttl': 600,
'value': None
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# empty value, empty string value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'AAAA',
'ttl': 600,
'value': ''
})
self.assertEquals(['empty value'], ctx.exception.reasons)
# missing value(s) & ttl
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'AAAA',
})
self.assertEquals(['missing ttl', 'missing value(s)'],
ctx.exception.reasons)
# invalid IPv6 address
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'value': 'hello'
})
self.assertEquals(['invalid IPv6 address "hello"'],
ctx.exception.reasons)
# invalid IPv6 addresses
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'values': ['hello', 'goodbye']
})
self.assertEquals([
'invalid IPv6 address "hello"',
'invalid IPv6 address "goodbye"'
], ctx.exception.reasons)
# invalid & valid IPv6 addresses, no ttl
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'AAAA',
'values': [
'2601:644:500:e210:62f8:1dff:feb8:947a',
'hello',
'2601:642:500:e210:62f8:1dff:feb8:947a'
]
})
self.assertEquals([
'missing ttl',
'invalid IPv6 address "hello"',
], ctx.exception.reasons)
def test_geo(self):
Record.new(self.zone, '', {
'geo': {
'NA': ['1.2.3.5'],
'NA-US': ['1.2.3.5', '1.2.3.6']
},
'type': 'A',
'ttl': 600,
'value': '1.2.3.4',
})
# invalid ip address
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'geo': {
'NA': ['hello'],
'NA-US': ['1.2.3.5', '1.2.3.6']
},
'type': 'A',
'ttl': 600,
'value': '1.2.3.4',
})
self.assertEquals(['invalid IPv4 address "hello"'],
ctx.exception.reasons)
# invalid geo code
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'geo': {
'XYZ': ['1.2.3.4'],
},
'type': 'A',
'ttl': 600,
'value': '1.2.3.4',
})
self.assertEquals(['invalid geo "XYZ"'], ctx.exception.reasons)
# invalid ip address
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'geo': {
'NA': ['hello'],
'NA-US': ['1.2.3.5', 'goodbye']
},
'type': 'A',
'ttl': 600,
'value': '1.2.3.4',
})
self.assertEquals([
'invalid IPv4 address "hello"',
'invalid IPv4 address "goodbye"'
], ctx.exception.reasons)
# invalid healthcheck protocol
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'a', {
'geo': {
'NA': ['1.2.3.5'],
'NA-US': ['1.2.3.5', '1.2.3.6']
},
'type': 'A',
'ttl': 600,
'value': '1.2.3.4',
'octodns': {
'healthcheck': {
'protocol': 'FTP',
}
}
})
self.assertEquals(['invalid healthcheck protocol'],
ctx.exception.reasons)
def test_AAAA(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'value': '2601:644:500:e210:62f8:1dff:feb8:947a',
})
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'values': [
'2601:644:500:e210:62f8:1dff:feb8:947a',
'2601:644:500:e210:62f8:1dff:feb8:947b',
]
})
# invalid ip address
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'value': 'hello'
})
self.assertEquals(['invalid IPv6 address "hello"'],
ctx.exception.reasons)
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'values': [
'1.2.3.4',
'2.3.4.5',
],
})
self.assertEquals([
'invalid IPv6 address "1.2.3.4"',
'invalid IPv6 address "2.3.4.5"',
], ctx.exception.reasons)
# invalid ip addresses
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'AAAA',
'ttl': 600,
'values': ['hello', 'goodbye']
})
self.assertEquals([
'invalid IPv6 address "hello"',
'invalid IPv6 address "goodbye"'
], ctx.exception.reasons)
def test_ALIAS_and_value_mixin(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': 'foo.bar.com.',
})
# root only
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'nope', {
'type': 'ALIAS',
'ttl': 600,
'value': 'foo.bar.com.',
})
self.assertEquals(['non-root ALIAS not allowed'],
ctx.exception.reasons)
# missing value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
})
self.assertEquals(['missing value'], ctx.exception.reasons)
# missing value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': None
})
self.assertEquals(['missing value'], ctx.exception.reasons)
# empty value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': ''
})
self.assertEquals(['empty value'], ctx.exception.reasons)
# not a valid FQDN
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': '__.',
})
self.assertEquals(['ALIAS value "__." is not a valid FQDN'],
ctx.exception.reasons)
# missing trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': 'foo.bar.com',
})
self.assertEquals(['ALIAS value "foo.bar.com" missing trailing .'],
ctx.exception.reasons)
def test_CAA(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'CAA',
'ttl': 600,
'value': {
'flags': 128,
'tag': 'iodef',
'value': 'http://foo.bar.com/'
}
})
# invalid flags
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'CAA',
'ttl': 600,
'value': {
'flags': -42,
'tag': 'iodef',
'value': 'http://foo.bar.com/',
}
})
self.assertEquals(['invalid flags "-42"'], ctx.exception.reasons)
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'CAA',
'ttl': 600,
'value': {
'flags': 442,
'tag': 'iodef',
'value': 'http://foo.bar.com/',
}
})
self.assertEquals(['invalid flags "442"'], ctx.exception.reasons)
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'CAA',
'ttl': 600,
'value': {
'flags': 'nope',
'tag': 'iodef',
'value': 'http://foo.bar.com/',
}
})
self.assertEquals(['invalid flags "nope"'], ctx.exception.reasons)
# missing tag
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'CAA',
'ttl': 600,
'value': {
'value': 'http://foo.bar.com/',
}
})
self.assertEquals(['missing tag'], ctx.exception.reasons)
# missing value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'CAA',
'ttl': 600,
'value': {
'tag': 'iodef',
}
})
self.assertEquals(['missing value'], ctx.exception.reasons)
def test_CNAME(self):
# doesn't blow up
Record.new(self.zone, 'www', {
'type': 'CNAME',
'ttl': 600,
'value': 'foo.bar.com.',
})
# root cname is a no-no
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'CNAME',
'ttl': 600,
'value': 'foo.bar.com.',
})
self.assertEquals(['root CNAME not allowed'], ctx.exception.reasons)
# not a valid FQDN
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'CNAME',
'ttl': 600,
'value': '___.',
})
self.assertEquals(['CNAME value "___." is not a valid FQDN'],
ctx.exception.reasons)
# missing trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'CNAME',
'ttl': 600,
'value': 'foo.bar.com',
})
self.assertEquals(['CNAME value "foo.bar.com" missing trailing .'],
ctx.exception.reasons)
def test_DNAME(self):
# A valid DNAME record.
Record.new(self.zone, 'sub', {
'type': 'DNAME',
'ttl': 600,
'value': 'foo.bar.com.',
})
# A DNAME record can be present at the zone APEX.
Record.new(self.zone, '', {
'type': 'DNAME',
'ttl': 600,
'value': 'foo.bar.com.',
})
# not a valid FQDN
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'DNAME',
'ttl': 600,
'value': '.',
})
self.assertEquals(['DNAME value "." is not a valid FQDN'],
ctx.exception.reasons)
# missing trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
'type': 'DNAME',
'ttl': 600,
'value': 'foo.bar.com',
})
self.assertEquals(['DNAME value "foo.bar.com" missing trailing .'],
ctx.exception.reasons)
def test_LOC(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
# missing int key
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['missing lat_degrees'], ctx.exception.reasons)
# missing float key
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['missing lat_seconds'], ctx.exception.reasons)
# missing text key
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 52.1,
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['missing lat_direction'], ctx.exception.reasons)
# invalid direction
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'U',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid direction for lat_direction "U"'],
ctx.exception.reasons)
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'N',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid direction for long_direction "N"'],
ctx.exception.reasons)
# invalid degrees
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 360,
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid value for lat_degrees "360"'],
ctx.exception.reasons)
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 'nope',
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid lat_degrees "nope"'],
ctx.exception.reasons)
# invalid minutes
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 60,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid value for lat_minutes "60"'],
ctx.exception.reasons)
# invalid seconds
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 60,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid value for lat_seconds "60"'],
ctx.exception.reasons)
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 'nope',
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid lat_seconds "nope"'],
ctx.exception.reasons)
# invalid altitude
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': -666666,
'size': 10,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid value for altitude "-666666"'],
ctx.exception.reasons)
# invalid size
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'LOC',
'ttl': 600,
'value': {
'lat_degrees': 31,
'lat_minutes': 58,
'lat_seconds': 52.1,
'lat_direction': 'S',
'long_degrees': 115,
'long_minutes': 49,
'long_seconds': 11.7,
'long_direction': 'E',
'altitude': 20,
'size': 99999999.99,
'precision_horz': 10,
'precision_vert': 2,
}
})
self.assertEquals(['invalid value for size "99999999.99"'],
ctx.exception.reasons)
def test_MX(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'MX',
'ttl': 600,
'value': {
'preference': 10,
'exchange': 'foo.bar.com.'
}
})
# missing preference
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'MX',
'ttl': 600,
'value': {
'exchange': 'foo.bar.com.'
}
})
self.assertEquals(['missing preference'], ctx.exception.reasons)
# invalid preference
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'MX',
'ttl': 600,
'value': {
'preference': 'nope',
'exchange': 'foo.bar.com.'
}
})
self.assertEquals(['invalid preference "nope"'], ctx.exception.reasons)
# missing exchange
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'MX',
'ttl': 600,
'value': {
'preference': 10,
}
})
self.assertEquals(['missing exchange'], ctx.exception.reasons)
# missing trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'MX',
'ttl': 600,
'value': {
'preference': 10,
'exchange': 'foo.bar.com'
}
})
self.assertEquals(['MX value "foo.bar.com" missing trailing .'],
ctx.exception.reasons)
def test_NXPTR(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'NAPTR',
'ttl': 600,
'value': {
'order': 10,
'preference': 20,
'flags': 'S',
'service': 'srv',
'regexp': '.*',
'replacement': '.'
}
})
# missing X priority
value = {
'order': 10,
'preference': 20,
'flags': 'S',
'service': 'srv',
'regexp': '.*',
'replacement': '.'
}
for k in ('order', 'preference', 'flags', 'service', 'regexp',
'replacement'):
v = dict(value)
del v[k]
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'NAPTR',
'ttl': 600,
'value': v
})
self.assertEquals([f'missing {k}'], ctx.exception.reasons)
# non-int order
v = dict(value)
v['order'] = 'boo'
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'NAPTR',
'ttl': 600,
'value': v
})
self.assertEquals(['invalid order "boo"'], ctx.exception.reasons)
# non-int preference
v = dict(value)
v['preference'] = 'who'
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'NAPTR',
'ttl': 600,
'value': v
})
self.assertEquals(['invalid preference "who"'], ctx.exception.reasons)
# unrecognized flags
v = dict(value)
v['flags'] = 'X'
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'NAPTR',
'ttl': 600,
'value': v
})
self.assertEquals(['unrecognized flags "X"'], ctx.exception.reasons)
def test_NS(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'NS',
'ttl': 600,
'values': [
'foo.bar.com.',
'1.2.3.4.'
]
})
# missing value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'NS',
'ttl': 600,
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# no trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'NS',
'ttl': 600,
'value': 'foo.bar',
})
self.assertEquals(['NS value "foo.bar" missing trailing .'],
ctx.exception.reasons)
def test_PTR(self):
# doesn't blow up (name & zone here don't make any sense, but not
# important)
Record.new(self.zone, '', {
'type': 'PTR',
'ttl': 600,
'value': 'foo.bar.com.',
})
# missing value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'PTR',
'ttl': 600,
})
self.assertEquals(['missing values'], ctx.exception.reasons)
# not a valid FQDN
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'PTR',
'ttl': 600,
'value': '_.',
})
self.assertEquals(['PTR value "_." is not a valid FQDN'],
ctx.exception.reasons)
# no trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'PTR',
'ttl': 600,
'value': 'foo.bar',
})
self.assertEquals(['PTR value "foo.bar" missing trailing .'],
ctx.exception.reasons)
def test_SSHFP(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'SSHFP',
'ttl': 600,
'value': {
'algorithm': 1,
'fingerprint_type': 1,
'fingerprint': 'bf6b6825d2977c511a475bbefb88aad54a92ac73'
}
})
# missing algorithm
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SSHFP',
'ttl': 600,
'value': {
'fingerprint_type': 1,
'fingerprint': 'bf6b6825d2977c511a475bbefb88aad54a92ac73'
}
})
self.assertEquals(['missing algorithm'], ctx.exception.reasons)
# invalid algorithm
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SSHFP',
'ttl': 600,
'value': {
'algorithm': 'nope',
'fingerprint_type': 2,
'fingerprint': 'bf6b6825d2977c511a475bbefb88aad54a92ac73'
}
})
self.assertEquals(['invalid algorithm "nope"'], ctx.exception.reasons)
# unrecognized algorithm
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SSHFP',
'ttl': 600,
'value': {
'algorithm': 42,
'fingerprint_type': 1,
'fingerprint': 'bf6b6825d2977c511a475bbefb88aad54a92ac73'
}
})
self.assertEquals(['unrecognized algorithm "42"'],
ctx.exception.reasons)
# missing fingerprint_type
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SSHFP',
'ttl': 600,
'value': {
'algorithm': 2,
'fingerprint': 'bf6b6825d2977c511a475bbefb88aad54a92ac73'
}
})
self.assertEquals(['missing fingerprint_type'], ctx.exception.reasons)
# invalid fingerprint_type
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SSHFP',
'ttl': 600,
'value': {
'algorithm': 3,
'fingerprint_type': 'yeeah',
'fingerprint': 'bf6b6825d2977c511a475bbefb88aad54a92ac73'
}
})
self.assertEquals(['invalid fingerprint_type "yeeah"'],
ctx.exception.reasons)
# unrecognized fingerprint_type
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SSHFP',
'ttl': 600,
'value': {
'algorithm': 1,
'fingerprint_type': 42,
'fingerprint': 'bf6b6825d2977c511a475bbefb88aad54a92ac73'
}
})
self.assertEquals(['unrecognized fingerprint_type "42"'],
ctx.exception.reasons)
# missing fingerprint
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SSHFP',
'ttl': 600,
'value': {
'algorithm': 1,
'fingerprint_type': 1,
}
})
self.assertEquals(['missing fingerprint'], ctx.exception.reasons)
def test_SPF(self):
# doesn't blow up (name & zone here don't make any sense, but not
# important)
Record.new(self.zone, '', {
'type': 'SPF',
'ttl': 600,
'values': [
'v=spf1 ip4:192.168.0.1/16-all',
'v=spf1 ip4:10.1.2.1/24-all',
'this has some\\; semi-colons\\; in it',
]
})
# missing value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SPF',
'ttl': 600,
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# missing escapes
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'SPF',
'ttl': 600,
'value': 'this has some; semi-colons\\; in it',
})
self.assertEquals(['unescaped ; in "this has some; '
'semi-colons\\; in it"'], ctx.exception.reasons)
def test_SRV(self):
# doesn't blow up
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'weight': 2,
'port': 3,
'target': 'foo.bar.baz.'
}
})
# permit wildcard entries
Record.new(self.zone, '*._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'weight': 2,
'port': 3,
'target': 'food.bar.baz.'
}
})
# invalid name
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'neup', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'weight': 2,
'port': 3,
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['invalid name for SRV record'],
ctx.exception.reasons)
# missing priority
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'weight': 2,
'port': 3,
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['missing priority'], ctx.exception.reasons)
# invalid priority
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 'foo',
'weight': 2,
'port': 3,
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['invalid priority "foo"'], ctx.exception.reasons)
# missing weight
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'port': 3,
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['missing weight'], ctx.exception.reasons)
# invalid weight
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'weight': 'foo',
'port': 3,
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['invalid weight "foo"'], ctx.exception.reasons)
# missing port
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'weight': 2,
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['missing port'], ctx.exception.reasons)
# invalid port
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'weight': 2,
'port': 'foo',
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['invalid port "foo"'], ctx.exception.reasons)
# missing target
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'weight': 2,
'port': 3,
}
})
self.assertEquals(['missing target'], ctx.exception.reasons)
# invalid target
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '_srv._tcp', {
'type': 'SRV',
'ttl': 600,
'value': {
'priority': 1,
'weight': 2,
'port': 3,
'target': 'foo.bar.baz'
}
})
self.assertEquals(['SRV value "foo.bar.baz" missing trailing .'],
ctx.exception.reasons)
def test_TXT(self):
# doesn't blow up (name & zone here don't make any sense, but not
# important)
Record.new(self.zone, '', {
'type': 'TXT',
'ttl': 600,
'values': [
'hello world',
'this has some\\; semi-colons\\; in it',
]
})
# missing value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'TXT',
'ttl': 600,
})
self.assertEquals(['missing value(s)'], ctx.exception.reasons)
# missing escapes
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'TXT',
'ttl': 600,
'value': 'this has some; semi-colons\\; in it',
})
self.assertEquals(['unescaped ; in "this has some; semi-colons\\; '
'in it"'], ctx.exception.reasons)
def test_TXT_long_value_chunking(self):
expected = '"Lorem ipsum dolor sit amet, consectetur adipiscing ' \
'elit, sed do eiusmod tempor incididunt ut labore et dolore ' \
'magna aliqua. Ut enim ad minim veniam, quis nostrud ' \
'exercitation ullamco laboris nisi ut aliquip ex ea commodo ' \
'consequat. Duis aute irure dolor i" "n reprehenderit in ' \
'voluptate velit esse cillum dolore eu fugiat nulla pariatur. ' \
'Excepteur sint occaecat cupidatat non proident, sunt in culpa ' \
'qui officia deserunt mollit anim id est laborum."'
long_value = 'Lorem ipsum dolor sit amet, consectetur adipiscing ' \
'elit, sed do eiusmod tempor incididunt ut labore et dolore ' \
'magna aliqua. Ut enim ad minim veniam, quis nostrud ' \
'exercitation ullamco laboris nisi ut aliquip ex ea commodo ' \
'consequat. Duis aute irure dolor in reprehenderit in ' \
'voluptate velit esse cillum dolore eu fugiat nulla ' \
'pariatur. Excepteur sint occaecat cupidatat non proident, ' \
'sunt in culpa qui officia deserunt mollit anim id est ' \
'laborum.'
# Single string
single = Record.new(self.zone, '', {
'type': 'TXT',
'ttl': 600,
'values': [
'hello world',
long_value,
'this has some\\; semi-colons\\; in it',
]
})
self.assertEquals(3, len(single.values))
self.assertEquals(3, len(single.chunked_values))
# Note we are checking that this normalizes the chunking, not that we
# get out what we put in.
self.assertEquals(expected, single.chunked_values[0])
long_split_value = '"Lorem ipsum dolor sit amet, consectetur ' \
'adipiscing elit, sed do eiusmod tempor incididunt ut ' \
'labore et dolore magna aliqua. Ut enim ad minim veniam, ' \
'quis nostrud exercitation ullamco laboris nisi ut aliquip ' \
'ex" " ea commodo consequat. Duis aute irure dolor in ' \
'reprehenderit in voluptate velit esse cillum dolore eu ' \
'fugiat nulla pariatur. Excepteur sint occaecat cupidatat ' \
'non proident, sunt in culpa qui officia deserunt mollit ' \
'anim id est laborum."'
# Chunked
chunked = Record.new(self.zone, '', {
'type': 'TXT',
'ttl': 600,
'values': [
'"hello world"',
long_split_value,
'"this has some\\; semi-colons\\; in it"',
]
})
self.assertEquals(expected, chunked.chunked_values[0])
# should be single values, no quoting
self.assertEquals(single.values, chunked.values)
# should be chunked values, with quoting
self.assertEquals(single.chunked_values, chunked.chunked_values)
def test_URLFWD(self):
# doesn't blow up
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 301,
'masking': 2,
'query': 0,
}
})
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'values': [{
'path': '/',
'target': 'http://foo',
'code': 301,
'masking': 2,
'query': 0,
}, {
'path': '/target',
'target': 'http://target',
'code': 302,
'masking': 2,
'query': 0,
}]
})
# missing path
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'target': 'http://foo',
'code': 301,
'masking': 2,
'query': 0,
}
})
self.assertEquals(['missing path'], ctx.exception.reasons)
# missing target
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'code': 301,
'masking': 2,
'query': 0,
}
})
self.assertEquals(['missing target'], ctx.exception.reasons)
# missing code
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'masking': 2,
'query': 0,
}
})
self.assertEquals(['missing code'], ctx.exception.reasons)
# invalid code
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 'nope',
'masking': 2,
'query': 0,
}
})
self.assertEquals(['invalid return code "nope"'],
ctx.exception.reasons)
# unrecognized code
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 3,
'masking': 2,
'query': 0,
}
})
self.assertEquals(['unrecognized return code "3"'],
ctx.exception.reasons)
# missing masking
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 301,
'query': 0,
}
})
self.assertEquals(['missing masking'], ctx.exception.reasons)
# invalid masking
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 301,
'masking': 'nope',
'query': 0,
}
})
self.assertEquals(['invalid masking setting "nope"'],
ctx.exception.reasons)
# unrecognized masking
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 301,
'masking': 3,
'query': 0,
}
})
self.assertEquals(['unrecognized masking setting "3"'],
ctx.exception.reasons)
# missing query
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 301,
'masking': 2,
}
})
self.assertEquals(['missing query'], ctx.exception.reasons)
# invalid query
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 301,
'masking': 2,
'query': 'nope',
}
})
self.assertEquals(['invalid query setting "nope"'],
ctx.exception.reasons)
# unrecognized query
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
'type': 'URLFWD',
'ttl': 600,
'value': {
'path': '/',
'target': 'http://foo',
'code': 301,
'masking': 2,
'query': 3,
}
})
self.assertEquals(['unrecognized query setting "3"'],
ctx.exception.reasons)
class TestDynamicRecords(TestCase):
zone = Zone('unit.tests.', [])
def test_simple_a_weighted(self):
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'weight': 10,
'value': '3.3.3.3',
}],
},
'two': {
# Testing out of order value sorting here
'values': [{
'value': '5.5.5.5',
}, {
'value': '4.4.4.4',
}],
},
'three': {
'values': [{
'weight': 10,
'value': '4.4.4.4',
}, {
'weight': 12,
'value': '5.5.5.5',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
a = ARecord(self.zone, 'weighted', a_data)
self.assertEquals('A', a._type)
self.assertEquals(a_data['ttl'], a.ttl)
self.assertEquals(a_data['values'], a.values)
dynamic = a.dynamic
self.assertTrue(dynamic)
pools = dynamic.pools
self.assertTrue(pools)
self.assertEquals({
'value': '3.3.3.3',
'weight': 1,
'status': 'obey',
}, pools['one'].data['values'][0])
self.assertEquals([{
'value': '4.4.4.4',
'weight': 1,
'status': 'obey',
}, {
'value': '5.5.5.5',
'weight': 1,
'status': 'obey',
}], pools['two'].data['values'])
self.assertEquals([{
'weight': 10,
'value': '4.4.4.4',
'status': 'obey',
}, {
'weight': 12,
'value': '5.5.5.5',
'status': 'obey',
}], pools['three'].data['values'])
rules = dynamic.rules
self.assertTrue(rules)
self.assertEquals(a_data['dynamic']['rules'][0], rules[0].data)
def test_simple_aaaa_weighted(self):
aaaa_data = {
'dynamic': {
'pools': {
'one': '2601:642:500:e210:62f8:1dff:feb8:9473',
'two': [
'2601:642:500:e210:62f8:1dff:feb8:9474',
'2601:642:500:e210:62f8:1dff:feb8:9475',
],
'three': {
1: '2601:642:500:e210:62f8:1dff:feb8:9476',
2: '2601:642:500:e210:62f8:1dff:feb8:9477',
},
},
'rules': [{
'pools': [
'three',
'two',
'one',
],
}],
},
'ttl': 60,
'values': [
'2601:642:500:e210:62f8:1dff:feb8:9471',
'2601:642:500:e210:62f8:1dff:feb8:9472',
],
}
aaaa_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '2601:642:500:e210:62f8:1dff:feb8:9473',
}],
},
'two': {
# Testing out of order value sorting here
'values': [{
'value': '2601:642:500:e210:62f8:1dff:feb8:9475',
}, {
'value': '2601:642:500:e210:62f8:1dff:feb8:9474',
}],
},
'three': {
'values': [{
'weight': 10,
'value': '2601:642:500:e210:62f8:1dff:feb8:9476',
}, {
'weight': 12,
'value': '2601:642:500:e210:62f8:1dff:feb8:9477',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'values': [
'2601:642:500:e210:62f8:1dff:feb8:9471',
'2601:642:500:e210:62f8:1dff:feb8:9472',
],
}
aaaa = AaaaRecord(self.zone, 'weighted', aaaa_data)
self.assertEquals('AAAA', aaaa._type)
self.assertEquals(aaaa_data['ttl'], aaaa.ttl)
self.assertEquals(aaaa_data['values'], aaaa.values)
dynamic = aaaa.dynamic
self.assertTrue(dynamic)
pools = dynamic.pools
self.assertTrue(pools)
self.assertEquals({
'value': '2601:642:500:e210:62f8:1dff:feb8:9473',
'weight': 1,
'status': 'obey',
}, pools['one'].data['values'][0])
self.assertEquals([{
'value': '2601:642:500:e210:62f8:1dff:feb8:9474',
'weight': 1,
'status': 'obey',
}, {
'value': '2601:642:500:e210:62f8:1dff:feb8:9475',
'weight': 1,
'status': 'obey',
}], pools['two'].data['values'])
self.assertEquals([{
'weight': 10,
'value': '2601:642:500:e210:62f8:1dff:feb8:9476',
'status': 'obey',
}, {
'weight': 12,
'value': '2601:642:500:e210:62f8:1dff:feb8:9477',
'status': 'obey',
}], pools['three'].data['values'])
rules = dynamic.rules
self.assertTrue(rules)
self.assertEquals(aaaa_data['dynamic']['rules'][0], rules[0].data)
def test_simple_cname_weighted(self):
cname_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': 'one.cname.target.',
}],
},
'two': {
'values': [{
'value': 'two.cname.target.',
}],
},
'three': {
'values': [{
'weight': 12,
'value': 'three-1.cname.target.',
}, {
'weight': 32,
'value': 'three-2.cname.target.',
}]
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'value': 'cname.target.',
}
cname = CnameRecord(self.zone, 'weighted', cname_data)
self.assertEquals('CNAME', cname._type)
self.assertEquals(cname_data['ttl'], cname.ttl)
self.assertEquals(cname_data['value'], cname.value)
dynamic = cname.dynamic
self.assertTrue(dynamic)
pools = dynamic.pools
self.assertTrue(pools)
self.assertEquals({
'value': 'one.cname.target.',
'weight': 1,
'status': 'obey',
}, pools['one'].data['values'][0])
self.assertEquals({
'value': 'two.cname.target.',
'weight': 1,
'status': 'obey',
}, pools['two'].data['values'][0])
self.assertEquals([{
'value': 'three-1.cname.target.',
'weight': 12,
'status': 'obey',
}, {
'value': 'three-2.cname.target.',
'weight': 32,
'status': 'obey',
}], pools['three'].data['values'])
rules = dynamic.rules
self.assertTrue(rules)
self.assertEquals(cname_data['dynamic']['rules'][0], rules[0].data)
def test_dynamic_validation(self):
# Missing pools
a_data = {
'dynamic': {
'rules': [{
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['missing pools', 'rule 1 undefined pool "one"'],
ctx.exception.reasons)
# Empty pools
a_data = {
'dynamic': {
'pools': {
},
'rules': [{
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['missing pools', 'rule 1 undefined pool "one"'],
ctx.exception.reasons)
# pools not a dict
a_data = {
'dynamic': {
'pools': [],
'rules': [{
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['pools must be a dict',
'rule 1 undefined pool "one"'],
ctx.exception.reasons)
# Invalid addresses
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': 'this-aint-right',
}],
},
'two': {
'fallback': 'one',
'values': [{
'value': '4.4.4.4',
}, {
'value': 'nor-is-this',
}]
},
'three': {
'fallback': 'two',
'values': [{
'weight': 1,
'value': '5.5.5.5',
}, {
'weight': 2,
'value': 'yet-another-bad-one',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
'invalid IPv4 address "this-aint-right"',
'invalid IPv4 address "yet-another-bad-one"',
'invalid IPv4 address "nor-is-this"',
], ctx.exception.reasons)
# missing value(s)
a_data = {
'dynamic': {
'pools': {
'one': {},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
'three': {
'values': [{
'weight': 1,
'value': '6.6.6.6',
}, {
'weight': 2,
'value': '7.7.7.7',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['pool "one" is missing values'],
ctx.exception.reasons)
# pool value not a dict
a_data = {
'dynamic': {
'pools': {
'one': '',
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
'three': {
'values': [{
'weight': 1,
'value': '6.6.6.6',
}, {
'weight': 2,
'value': '7.7.7.7',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['pool "one" must be a dict'],
ctx.exception.reasons)
# empty pool value
a_data = {
'dynamic': {
'pools': {
'one': {},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
'three': {
'values': [{
'weight': 1,
'value': '6.6.6.6',
}, {
'weight': 2,
'value': '7.7.7.7',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['pool "one" is missing values'],
ctx.exception.reasons)
# invalid int weight
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
'three': {
'values': [{
'weight': 1,
'value': '6.6.6.6',
}, {
'weight': 101,
'value': '7.7.7.7',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['invalid weight "101" in pool "three" value 2'],
ctx.exception.reasons)
# invalid non-int weight
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
'three': {
'values': [{
'weight': 1,
'value': '6.6.6.6',
}, {
'weight': 'foo',
'value': '7.7.7.7',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['invalid weight "foo" in pool "three" value 2'],
ctx.exception.reasons)
# single value with weight!=1
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'weight': 12,
'value': '6.6.6.6',
}],
},
},
'rules': [{
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['pool "one" has single value with weight!=1'],
ctx.exception.reasons)
# invalid fallback
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}],
},
'two': {
'fallback': 'invalid',
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
'three': {
'fallback': 'two',
'values': [{
'weight': 1,
'value': '6.6.6.6',
}, {
'weight': 5,
'value': '7.7.7.7',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['undefined fallback "invalid" for pool "two"'],
ctx.exception.reasons)
# fallback loop
a_data = {
'dynamic': {
'pools': {
'one': {
'fallback': 'three',
'values': [{
'value': '3.3.3.3',
}],
},
'two': {
'fallback': 'one',
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
'three': {
'fallback': 'two',
'values': [{
'weight': 1,
'value': '6.6.6.6',
}, {
'weight': 5,
'value': '7.7.7.7',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
'loop in pool fallbacks: one -> three -> two',
'loop in pool fallbacks: three -> two -> one',
'loop in pool fallbacks: two -> one -> three'
], ctx.exception.reasons)
# multiple pool problems
a_data = {
'dynamic': {
'pools': {
'one': '',
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': 'blip',
}]
},
'three': {
'values': [{
'weight': 1,
}, {
'weight': 5000,
'value': '7.7.7.7',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
'pool "one" must be a dict',
'missing value in pool "three" value 1',
'invalid weight "5000" in pool "three" value 2',
'invalid IPv4 address "blip"',
], ctx.exception.reasons)
# missing rules, and unused pools
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
'missing rules',
'unused pools: "one", "two"',
], ctx.exception.reasons)
# empty rules
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
'missing rules',
'unused pools: "one", "two"',
], ctx.exception.reasons)
# rules not a list/tuple
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': {},
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
'rules must be a list',
'unused pools: "one", "two"',
], ctx.exception.reasons)
# rule without pool
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}],
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['NA-US-CA'],
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
'rule 1 missing pool',
'unused pools: "two"',
], ctx.exception.reasons)
# rule with non-string pools
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['NA-US-CA'],
'pool': [],
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
'rule 1 invalid pool "[]"',
'unused pools: "two"',
], ctx.exception.reasons)
# rule references non-existent pool
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['NA-US-CA'],
'pool': 'non-existent',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals([
"rule 1 undefined pool \"non-existent\"",
'unused pools: "two"',
], ctx.exception.reasons)
# rule with invalid geos
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': 'NA-US-CA',
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['rule 1 geos must be a list'],
ctx.exception.reasons)
# rule with invalid geo
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['invalid'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['rule 1 unknown continent code "invalid"'],
ctx.exception.reasons)
# multiple default rules
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['rule 2 duplicate default'],
ctx.exception.reasons)
# repeated pool in rules
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['EU'],
'pool': 'two',
}, {
'geos': ['AF'],
'pool': 'one',
}, {
'geos': ['OC'],
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['rule 3 invalid, target pool "one" reused'],
ctx.exception.reasons)
# Repeated pool is OK if later one is a default
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['EU-GB'],
'pool': 'one',
}, {
'geos': ['EU'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
# This should be valid, no exception
Record.new(self.zone, 'bad', a_data)
# invalid status
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '2.2.2.2',
'status': 'none',
}],
},
},
'rules': [{
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': ['1.1.1.1'],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertIn('invalid status', ctx.exception.reasons[0])
def test_dynamic_lenient(self):
# Missing pools
a_data = {
'dynamic': {
'rules': [{
'geos': ['EU'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
a = Record.new(self.zone, 'bad', a_data, lenient=True)
self.assertEquals({
'pools': {},
'rules': a_data['dynamic']['rules'],
}, a._data()['dynamic'])
# Missing rule
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
'weight': 2,
}]
},
},
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
a = Record.new(self.zone, 'bad', a_data, lenient=True)
self.assertEquals({
'pools': {
'one': {
'fallback': None,
'values': [{
'value': '3.3.3.3',
'weight': 1,
'status': 'obey',
}]
},
'two': {
'fallback': None,
'values': [{
'value': '4.4.4.4',
'weight': 1,
'status': 'obey',
}, {
'value': '5.5.5.5',
'weight': 2,
'status': 'obey',
}]
},
},
'rules': [],
}, a._data()['dynamic'])
# rule without pool
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
'weight': 2,
}]
},
},
'rules': [{
'geos': ['EU'],
'pool': 'two',
}, {
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
a = Record.new(self.zone, 'bad', a_data, lenient=True)
self.assertEquals({
'pools': {
'one': {
'fallback': None,
'values': [{
'value': '3.3.3.3',
'weight': 1,
'status': 'obey',
}]
},
'two': {
'fallback': None,
'values': [{
'value': '4.4.4.4',
'weight': 1,
'status': 'obey',
}, {
'value': '5.5.5.5',
'weight': 2,
'status': 'obey',
}]
},
},
'rules': a_data['dynamic']['rules'],
}, a._data()['dynamic'])
def test_dynamic_changes(self):
simple = SimpleProvider()
dynamic = DynamicProvider()
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['EU'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
a = ARecord(self.zone, 'weighted', a_data)
dup = ARecord(self.zone, 'weighted', a_data)
b_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
'weight': 2,
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['EU'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
b = ARecord(self.zone, 'weighted', b_data)
c_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
},
'rules': [{
'geos': ['NA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'ttl': 60,
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
c = ARecord(self.zone, 'weighted', c_data)
# a changes a (identical dup) is never true
self.assertFalse(a.changes(dup, simple))
self.assertFalse(a.changes(dup, dynamic))
# a changes b is not true for simple
self.assertFalse(a.changes(b, simple))
# but is true for dynamic
update = a.changes(b, dynamic)
self.assertEquals(a, update.existing)
self.assertEquals(b, update.new)
# transitive
self.assertFalse(b.changes(a, simple))
update = b.changes(a, dynamic)
self.assertEquals(a, update.existing)
self.assertEquals(b, update.new)
# same for a change c
self.assertFalse(a.changes(c, simple))
self.assertTrue(a.changes(c, dynamic))
self.assertFalse(c.changes(a, simple))
self.assertTrue(c.changes(a, dynamic))
# smoke test some of the equiality bits
self.assertEquals(a.dynamic.pools, a.dynamic.pools)
self.assertEquals(a.dynamic.pools['one'], a.dynamic.pools['one'])
self.assertNotEquals(a.dynamic.pools['one'], a.dynamic.pools['two'])
self.assertEquals(a.dynamic.rules, a.dynamic.rules)
self.assertEquals(a.dynamic.rules[0], a.dynamic.rules[0])
self.assertNotEquals(a.dynamic.rules[0], c.dynamic.rules[0])
def test_dynamic_and_geo_validation(self):
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}],
},
'two': {
# Testing out of order value sorting here
'values': [{
'value': '5.5.5.5',
}, {
'value': '4.4.4.4',
}],
},
'three': {
'values': [{
'weight': 10,
'value': '4.4.4.4',
}, {
'weight': 12,
'value': '5.5.5.5',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'geo': {
'NA': ['1.2.3.5'],
'NA-US': ['1.2.3.5', '1.2.3.6']
},
'type': 'A',
'ttl': 60,
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['"dynamic" record with "geo" content'],
ctx.exception.reasons)
def test_dynamic_eqs(self):
pool_one = _DynamicPool('one', {
'values': [{
'value': '1.2.3.4',
}],
})
pool_two = _DynamicPool('two', {
'values': [{
'value': '1.2.3.5',
}],
})
self.assertEquals(pool_one, pool_one)
self.assertNotEquals(pool_one, pool_two)
self.assertNotEquals(pool_one, 42)
pools = {
'one': pool_one,
'two': pool_two,
}
rule_one = _DynamicRule(0, {
'pool': 'one',
})
rule_two = _DynamicRule(1, {
'pool': 'two',
})
self.assertEquals(rule_one, rule_one)
self.assertNotEquals(rule_one, rule_two)
self.assertNotEquals(rule_one, 42)
rules = [
rule_one,
rule_two,
]
dynamic = _Dynamic(pools, rules)
other = _Dynamic({}, [])
self.assertEquals(dynamic, dynamic)
self.assertNotEquals(dynamic, other)
self.assertNotEquals(dynamic, 42)
|
py | 1a4a59414145c63dcee83503d8b4ab3558513e03 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# You can find misc modules, which dont fit in anything xD
""" Userbot module for other small commands. """
from random import randint
from asyncio import sleep
from os import execl
import sys
import os
import io
import sys
import json
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot
from userbot.events import register
@register(outgoing=True, pattern="^.random")
async def randomise(items):
""" For .random command, get a random item from the list of items. """
itemo = (items.text[8:]).split()
if len(itemo) < 2:
await items.edit(
"`2 or more items are required! Check .help random for more info.`"
)
return
index = randint(1, len(itemo) - 1)
await items.edit("**Query: **\n`" + items.text[8:] + "`\n**Output: **\n`" +
itemo[index] + "`")
@register(outgoing=True, pattern="^.sleep( [0-9]+)?$")
async def sleepybot(time):
""" For .sleep command, let the userbot snooze for a few second. """
message = time.text
if " " not in time.pattern_match.group(1):
await time.reply("Syntax: `.sleep [seconds]`")
else:
counter = int(time.pattern_match.group(1))
await time.edit("`I am sulking and snoozing....`")
await sleep(2)
if BOTLOG:
await time.client.send_message(
BOTLOG_CHATID,
"You put the bot to sleep for " + str(counter) + " seconds",
)
await sleep(counter)
await time.edit("`OK, I'm awake now.`")
@register(outgoing=True, pattern="^.shutdown$")
async def killdabot(event):
""" For .shutdown command, shut the bot down."""
await event.edit("`Goodbye *Windows XP shutdown sound*....`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#SHUTDOWN \n"
"Bot shut down")
await bot.disconnect()
@register(outgoing=True, pattern="^.restart$")
async def killdabot(event):
await event.edit("`*i would be back in a moment*`")
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#RESTART \n"
"Bot Restarted")
await bot.disconnect()
# Spin a new instance of bot
execl(sys.executable, sys.executable, *sys.argv)
# Shut the existing one down
exit()
@register(outgoing=True, pattern="^.community$")
async def bot_community(community):
""" For .community command, just returns OG Paperplane's group link. """
await community.edit(
"Join RaphielGang's awesome userbot community: @userbot_support"
"\nDo note that Paperplane Extended is an unoficial fork of their "
"Paperplane project and it may get limited or no support for bugs.")
@register(outgoing=True, pattern="^.support$")
async def bot_support(wannahelp):
""" For .support command, just returns the group link. """
await wannahelp.edit(
"Join the OpenUserBot Channel: @PaperPlaneExtended_news \
\nJoin the OpenUserBot Chat: @PPE_Support")
@register(outgoing=True, pattern="^.creator$")
async def creator(e):
await e.edit("[TeKnoways](https://t.me/Three_Cube_TeKnoways)")
@register(outgoing=True, pattern="^.readme$")
async def reedme(e):
await e.edit(
"Here's something for you to read:\n"
"\n[OpenUserBot's README.md file](https://github.com/mkaraniya/OpenUserBot/blob/sql-extended/README.md)"
"\n[Setup Guide - Basic](https://telegra.ph/How-to-host-a-Telegram-Userbot-11-02)"
"\n[Setup Guide - Google Drive](https://telegra.ph/How-To-Setup-GDrive-11-02)"
"\n[Setup Guide - LastFM Module](https://telegra.ph/How-to-set-up-LastFM-module-for-Paperplane-userbot-11-02)"
"\n[Video Tutorial - 576p](https://mega.nz/#!ErwCESbJ!1ZvYAKdTEfb6y1FnqqiLhHH9vZg4UB2QZNYL9fbQ9vs)"
"\n[Video Tutorial - 1080p](https://mega.nz/#!x3JVhYwR!u7Uj0nvD8_CyyARrdKrFqlZEBFTnSVEiqts36HBMr-o)"
"\n[Special - Note](https://telegra.ph/Special-Note-11-02)")
# Copyright (c) Gegham Zakaryan | 2019
@register(outgoing=True, pattern="^.repeat (.*)")
async def repeat(rep):
cnt, txt = rep.pattern_match.group(1).split(' ', 1)
replyCount = int(cnt)
toBeRepeated = txt
replyText = toBeRepeated + "\n"
for i in range(0, replyCount - 1):
replyText += toBeRepeated + "\n"
await rep.edit(replyText)
@register(outgoing=True, pattern="^.repo$")
async def repo_is_here(wannasee):
""" For .repo command, just returns the repo URL. """
await wannasee.edit(
"Click [here](https://github.com/ayixx619/ppek.git) to open my kang userbot page."
)
@register(outgoing=True, pattern="^.raw$")
async def raw(event):
the_real_message = None
reply_to_id = None
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
the_real_message = previous_message.stringify()
reply_to_id = event.reply_to_msg_id
else:
the_real_message = event.stringify()
reply_to_id = event.message.id
with io.BytesIO(str.encode(the_real_message)) as out_file:
out_file.name = "raw_message_data.txt"
await event.edit(
"`Check the userbot log for the decoded message data !!`")
await event.client.send_file(
BOTLOG_CHATID,
out_file,
force_document=True,
allow_cache=False,
reply_to=reply_to_id,
caption="`Here's the decoded message data !!`")
CMD_HELP.update({
'random':
'.random <item1> <item2> ... <itemN>\
\nUsage: Get a random item from the list of items.'
})
CMD_HELP.update({
'sleep':
'.sleep <seconds>\
\nUsage: Userbots get tired too. Let yours snooze for a few seconds.'
})
CMD_HELP.update({
"shutdown":
".shutdown\
\nUsage: Sometimes you need to shut down your bot. Sometimes you just hope to\
hear Windows XP shutdown sound... but you don't."
})
CMD_HELP.update(
{'support': ".support\
\nUsage: If you need help, use this command."})
CMD_HELP.update({
'community':
".community\
\nUsage: Join the awesome Paperplane userbot community !!"
})
CMD_HELP.update({
'repo':
'.repo\
\nUsage: If you are curious what makes the userbot work, this is what you need.'
})
CMD_HELP.update({
"readme":
".readme\
\nUsage: Provide links to setup the userbot and it's modules."
})
CMD_HELP.update(
{"creator": ".creator\
\nUsage: Know who created this awesome userbot !!"})
CMD_HELP.update({
"repeat":
".repeat <no.> <text>\
\nUsage: Repeats the text for a number of times. Don't confuse this with spam tho."
})
CMD_HELP.update({"restart": ".restart\
\nUsage: Restarts the bot !!"})
CMD_HELP.update({
"raw":
".raw\
\nUsage: Get detailed JSON-like formatted data about replied message."
})
|
py | 1a4a5a3dbc54b0c6419e7b57ad849eaf83f10459 | # coding: utf-8
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the collections editor."""
from core.controllers import base
from core.domain import collection_services
from core.domain import config_domain
from core.domain import rights_manager
from core.domain import summary_services
from core.platform import models
import feconf
import utils
current_user_services = models.Registry.import_current_user_services()
def _require_valid_version(version_from_payload, collection_version):
"""Check that the payload version matches the given collection version."""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != collection_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of collection from version %s, '
'which is too old. Please reload the page and try again.'
% (collection_version, version_from_payload))
def require_editor(handler):
"""Decorator that checks if the user can edit the given collection."""
def test_collection_editor(self, collection_id, **kwargs):
"""Gets the user and collection id if the user can edit it.
Args:
self: the handler instance
collection_id: the collection id
**kwargs: any other arguments passed to the handler
Returns:
The relevant handler, if the user is authorized to edit this
collection.
Raises:
self.PageNotFoundException: if no such collection exists.
self.UnauthorizedUserException: if the user exists but does not
have the right credentials.
"""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
if (self.username in config_domain.BANNED_USERNAMES.value
or self.username not in
config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value):
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
try:
collection_services.get_collection_by_id(collection_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_edit(
feconf.ACTIVITY_TYPE_COLLECTION, collection_id):
raise self.UnauthorizedUserException(
'You do not have the credentials to edit this collection.',
self.user_id)
return handler(self, collection_id, **kwargs)
return test_collection_editor
class CollectionEditorHandler(base.BaseHandler):
"""Base class for all handlers for the collection editor page."""
pass
class CollectionEditorPage(CollectionEditorHandler):
"""The editor page for a single collection."""
@require_editor
def get(self, collection_id):
"""Handles GET requests."""
collection = collection_services.get_collection_by_id(
collection_id, strict=False)
self.values.update({
'can_edit': True,
'can_unpublish': rights_manager.Actor(
self.user_id).can_unpublish(
feconf.ACTIVITY_TYPE_COLLECTION, collection_id),
'collection_id': collection.id,
'is_private': rights_manager.is_collection_private(collection_id),
'nav_mode': feconf.NAV_MODE_CREATE,
'title': collection.title,
'SHOW_COLLECTION_NAVIGATION_TAB_HISTORY': (
feconf.SHOW_COLLECTION_NAVIGATION_TAB_HISTORY),
'SHOW_COLLECTION_NAVIGATION_TAB_STATS': (
feconf.SHOW_COLLECTION_NAVIGATION_TAB_STATS),
'TAG_REGEX': feconf.TAG_REGEX,
})
self.render_template('pages/collection_editor/collection_editor.html')
class EditableCollectionDataHandler(CollectionEditorHandler):
"""A data handler for collections which supports writing."""
def _require_valid_version(self, version_from_payload, collection_version):
"""Check that the payload version matches the given collection version.
"""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != collection_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of collection from version %s, '
'which is too old. Please reload the page and try again.'
% (collection_version, version_from_payload))
@require_editor
def get(self, collection_id):
"""Populates the data on the individual collection page."""
try:
# Try to retrieve collection
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user_id,
allow_invalid_explorations=True))
except Exception as e:
raise self.PageNotFoundException(e)
self.values.update({
'collection': collection_dict
})
self.render_json(self.values)
@require_editor
def put(self, collection_id):
"""Updates properties of the given collection."""
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
self._require_valid_version(version, collection.version)
commit_message = self.payload.get('commit_message')
change_list = self.payload.get('change_list')
try:
collection_services.update_collection(
self.user_id, collection_id, change_list, commit_message)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user_id, allow_invalid_explorations=True))
# Send the updated collection back to the frontend.
self.values.update({
'collection': collection_dict
})
self.render_json(self.values)
class CollectionRightsHandler(CollectionEditorHandler):
"""Handles management of collection editing rights."""
@require_editor
def put(self, collection_id):
"""Updates the editing rights for the given collection."""
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
_require_valid_version(version, collection.version)
# TODO(bhenning): Implement other rights changes here.
is_public = self.payload.get('is_public')
if is_public is not None:
if is_public:
try:
collection.validate(strict=True)
collection_services.validate_exps_in_collection_are_public(
collection)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
collection_services.publish_collection_and_update_user_profiles(
self.user_id, collection_id)
collection_services.index_collections_given_ids([
collection_id])
elif rights_manager.Actor(self.user_id).can_unpublish(
feconf.ACTIVITY_TYPE_COLLECTION, collection_id):
rights_manager.unpublish_collection(self.user_id, collection_id)
collection_services.delete_documents_from_search_index([
collection_id])
else:
raise self.InvalidInputException(
'Cannot unpublish a collection.')
self.render_json({
'rights': rights_manager.get_collection_rights(
collection_id).to_dict()
})
class ExplorationMetadataSearchHandler(base.BaseHandler):
"""Provides data for exploration search."""
def get(self):
"""Handles GET requests."""
query_string = self.request.get('q')
search_cursor = self.request.get('cursor', None)
collection_node_metadata_list, new_search_cursor = (
summary_services.get_exp_metadata_dicts_matching_query(
query_string, search_cursor, self.user_id))
self.values.update({
'collection_node_metadata_list': collection_node_metadata_list,
'search_cursor': new_search_cursor,
})
self.render_json(self.values)
|
py | 1a4a5a54e9ae54ce3e8f29458a6727424ec17666 | import json
class CustomAction:
"""
Class CustomAction,
emulates a custom action, is a preconfigured set of changes that are applied to a work package
"""
def __init__(self, json_obj):
"""Constructor for class CustomAction
:param json_obj: The dict with the object data
"""
self.__dict__ = json_obj
def __str__(self):
"""
Returns the object as a string JSON
:return: JSON as a string
"""
return json.dumps(self.__dict__)
|
py | 1a4a5b02ba186d25ada1a0a42c7f1bf3cb6b57ea | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.agent import agent
from lib.core.common import arrayizeValue
from lib.core.common import Backend
from lib.core.common import filterPairValues
from lib.core.common import getLimitRange
from lib.core.common import isInferenceAvailable
from lib.core.common import isNoneValue
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.common import readInput
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import safeStringFormat
from lib.core.common import unArrayizeValue
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.data import queries
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import DBMS
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import CURRENT_DB
from lib.core.settings import METADB_SUFFIX
from lib.request import inject
from lib.techniques.brute.use import columnExists
from lib.techniques.brute.use import tableExists
class Search:
"""
This class defines search functionalities for plugins.
"""
def __init__(self):
pass
def searchDb(self):
foundDbs = []
rootQuery = queries[Backend.getIdentifiedDbms()].search_db
dbList = conf.db.split(",")
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
dbCond = rootQuery.inband.condition2
else:
dbCond = rootQuery.inband.condition
dbConsider, dbCondParam = self.likeOrExact("database")
for db in dbList:
values = []
db = safeSQLIdentificatorNaming(db)
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
db = db.upper()
infoMsg = "searching database"
if dbConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(db)
logger.info(infoMsg)
if conf.excludeSysDbs:
exclDbsQuery = "".join(" AND '%s' != %s" % (unsafeSQLIdentificatorNaming(db), dbCond) for db in self.excludeDbsList)
infoMsg = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(db for db in self.excludeDbsList))
logger.info(infoMsg)
else:
exclDbsQuery = ""
dbQuery = "%s%s" % (dbCond, dbCondParam)
dbQuery = dbQuery % unsafeSQLIdentificatorNaming(db)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.inband.query2
else:
query = rootQuery.inband.query
query = query % (dbQuery + exclDbsQuery)
values = inject.getValue(query, blind=False, time=False)
if not isNoneValue(values):
values = arrayizeValue(values)
for value in values:
value = safeSQLIdentificatorNaming(value)
foundDbs.append(value)
if not values and isInferenceAvailable() and not conf.direct:
infoMsg = "fetching number of database"
if dbConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(db)
logger.info(infoMsg)
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.blind.count2
else:
query = rootQuery.blind.count
query = query % (dbQuery + exclDbsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no database"
if dbConsider == "1":
warnMsg += "s like"
warnMsg += " '%s' found" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.blind.query2
else:
query = rootQuery.blind.query
query = query % (dbQuery + exclDbsQuery)
query = agent.limitQuery(index, query, dbCond)
value = unArrayizeValue(inject.getValue(query, union=False, error=False))
value = safeSQLIdentificatorNaming(value)
foundDbs.append(value)
conf.dumper.lister("found databases", foundDbs)
def searchTable(self):
bruteForce = False
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
errMsg = "information_schema not available, "
errMsg += "back-end DBMS is MySQL < 5.0"
bruteForce = True
if bruteForce:
message = "do you want to use common table existence check? %s" % ("[Y/n/q]" if Backend.getIdentifiedDbms() in (DBMS.ACCESS,) else "[y/N/q]")
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
regex = "|".join(conf.tbl.split(","))
return tableExists(paths.COMMON_TABLES, regex)
foundTbls = {}
tblList = conf.tbl.split(",")
rootQuery = queries[Backend.getIdentifiedDbms()].search_table
tblCond = rootQuery.inband.condition
dbCond = rootQuery.inband.condition2
tblConsider, tblCondParam = self.likeOrExact("table")
for tbl in tblList:
values = []
tbl = safeSQLIdentificatorNaming(tbl, True)
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2, DBMS.FIREBIRD):
tbl = tbl.upper()
infoMsg = "searching table"
if tblConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
if dbCond and conf.db and conf.db != CURRENT_DB:
_ = conf.db.split(",")
whereDbsQuery = " AND (" + " OR ".join("%s = '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in _) + ")"
infoMsg += " for database%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(db for db in _))
elif conf.excludeSysDbs:
whereDbsQuery = "".join(" AND '%s' != %s" % (unsafeSQLIdentificatorNaming(db), dbCond) for db in self.excludeDbsList)
infoMsg2 = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(db for db in self.excludeDbsList))
logger.info(infoMsg2)
else:
whereDbsQuery = ""
logger.info(infoMsg)
tblQuery = "%s%s" % (tblCond, tblCondParam)
tblQuery = tblQuery % unsafeSQLIdentificatorNaming(tbl)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
query = rootQuery.inband.query
query = query % (tblQuery + whereDbsQuery)
values = inject.getValue(query, blind=False, time=False)
if values and Backend.getIdentifiedDbms() in (DBMS.SQLITE, DBMS.FIREBIRD):
newValues = []
if isinstance(values, basestring):
values = [values]
for value in values:
dbName = "SQLite" if Backend.isDbms(DBMS.SQLITE) else "Firebird"
newValues.append(["%s%s" % (dbName, METADB_SUFFIX), value])
values = newValues
for foundDb, foundTbl in filterPairValues(values):
foundDb = safeSQLIdentificatorNaming(foundDb)
foundTbl = safeSQLIdentificatorNaming(foundTbl, True)
if foundDb is None or foundTbl is None:
continue
if foundDb in foundTbls:
foundTbls[foundDb].append(foundTbl)
else:
foundTbls[foundDb] = [foundTbl]
if not values and isInferenceAvailable() and not conf.direct:
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
if len(whereDbsQuery) == 0:
infoMsg = "fetching number of databases with table"
if tblConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
logger.info(infoMsg)
query = rootQuery.blind.count
query = query % (tblQuery + whereDbsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no databases have table"
if tblConsider == "1":
warnMsg += "s like"
warnMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query
query = query % (tblQuery + whereDbsQuery)
query = agent.limitQuery(index, query)
foundDb = unArrayizeValue(inject.getValue(query, union=False, error=False))
foundDb = safeSQLIdentificatorNaming(foundDb)
if foundDb not in foundTbls:
foundTbls[foundDb] = []
if tblConsider == "2":
foundTbls[foundDb].append(tbl)
if tblConsider == "2":
continue
else:
for db in conf.db.split(",") if conf.db else (self.getCurrentDb(),):
db = safeSQLIdentificatorNaming(db)
if db not in foundTbls:
foundTbls[db] = []
else:
dbName = "SQLite" if Backend.isDbms(DBMS.SQLITE) else "Firebird"
foundTbls["%s%s" % (dbName, METADB_SUFFIX)] = []
for db in foundTbls.keys():
db = safeSQLIdentificatorNaming(db)
infoMsg = "fetching number of table"
if tblConsider == "1":
infoMsg += "s like"
infoMsg += " '%s' in database '%s'" % (unsafeSQLIdentificatorNaming(tbl), unsafeSQLIdentificatorNaming(db))
logger.info(infoMsg)
query = rootQuery.blind.count2
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
query = query % unsafeSQLIdentificatorNaming(db)
query += " AND %s" % tblQuery
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no table"
if tblConsider == "1":
warnMsg += "s like"
warnMsg += " '%s' " % unsafeSQLIdentificatorNaming(tbl)
warnMsg += "in database '%s'" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query2
if query.endswith("'%s')"):
query = query[:-1] + " AND %s)" % tblQuery
else:
query += " AND %s" % tblQuery
if Backend.isDbms(DBMS.FIREBIRD):
query = safeStringFormat(query, index)
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
query = safeStringFormat(query, unsafeSQLIdentificatorNaming(db))
if not Backend.isDbms(DBMS.FIREBIRD):
query = agent.limitQuery(index, query)
foundTbl = unArrayizeValue(inject.getValue(query, union=False, error=False))
if not isNoneValue(foundTbl):
kb.hintValue = foundTbl
foundTbl = safeSQLIdentificatorNaming(foundTbl, True)
foundTbls[db].append(foundTbl)
for db in foundTbls.keys():
if isNoneValue(foundTbls[db]):
del foundTbls[db]
if not foundTbls:
warnMsg = "no databases contain any of the provided tables"
logger.warn(warnMsg)
return
conf.dumper.dbTables(foundTbls)
self.dumpFoundTables(foundTbls)
def searchColumn(self):
bruteForce = False
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
errMsg = "information_schema not available, "
errMsg += "back-end DBMS is MySQL < 5.0"
bruteForce = True
if bruteForce:
message = "do you want to use common column existence check? %s" % ("[Y/n/q]" if Backend.getIdentifiedDbms() in (DBMS.ACCESS,) else "[y/N/q]")
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
regex = '|'.join(conf.col.split(','))
conf.dumper.dbTableColumns(columnExists(paths.COMMON_COLUMNS, regex))
message = "do you want to dump entries? [Y/n] "
output = readInput(message, default="Y")
if output and output[0] not in ("n", "N"):
self.dumpAll()
return
rootQuery = queries[Backend.getIdentifiedDbms()].search_column
foundCols = {}
dbs = {}
whereDbsQuery = ""
whereTblsQuery = ""
infoMsgTbl = ""
infoMsgDb = ""
colList = conf.col.split(",")
if conf.excludeCol:
colList = [_ for _ in colList if _ not in conf.excludeCol.split(',')]
origTbl = conf.tbl
origDb = conf.db
colCond = rootQuery.inband.condition
dbCond = rootQuery.inband.condition2
tblCond = rootQuery.inband.condition3
colConsider, colCondParam = self.likeOrExact("column")
for column in colList:
values = []
column = safeSQLIdentificatorNaming(column)
conf.db = origDb
conf.tbl = origTbl
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
column = column.upper()
infoMsg = "searching column"
if colConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
foundCols[column] = {}
if conf.tbl:
_ = conf.tbl.split(",")
whereTblsQuery = " AND (" + " OR ".join("%s = '%s'" % (tblCond, unsafeSQLIdentificatorNaming(tbl)) for tbl in _) + ")"
infoMsgTbl = " for table%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(tbl) for tbl in _))
if conf.db and conf.db != CURRENT_DB:
_ = conf.db.split(",")
whereDbsQuery = " AND (" + " OR ".join("%s = '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in _) + ")"
infoMsgDb = " in database%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(db) for db in _))
elif conf.excludeSysDbs:
whereDbsQuery = "".join(" AND %s != '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in self.excludeDbsList)
infoMsg2 = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(db) for db in self.excludeDbsList))
logger.info(infoMsg2)
else:
infoMsgDb = " across all databases"
logger.info("%s%s%s" % (infoMsg, infoMsgTbl, infoMsgDb))
colQuery = "%s%s" % (colCond, colCondParam)
colQuery = colQuery % unsafeSQLIdentificatorNaming(column)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
if not all((conf.db, conf.tbl)):
# Enumerate tables containing the column provided if
# either of database(s) or table(s) is not provided
query = rootQuery.inband.query
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
values = inject.getValue(query, blind=False, time=False)
else:
# Assume provided databases' tables contain the
# column(s) provided
values = []
for db in conf.db.split(","):
for tbl in conf.tbl.split(","):
values.append([safeSQLIdentificatorNaming(db), safeSQLIdentificatorNaming(tbl, True)])
for db, tbl in filterPairValues(values):
db = safeSQLIdentificatorNaming(db)
tbls = tbl.split(",") if not isNoneValue(tbl) else []
for tbl in tbls:
tbl = safeSQLIdentificatorNaming(tbl, True)
if db is None or tbl is None:
continue
conf.db = db
conf.tbl = tbl
conf.col = column
self.getColumns(onlyColNames=True, colTuple=(colConsider, colCondParam), bruteForce=False)
if db in kb.data.cachedColumns and tbl in kb.data.cachedColumns[db]:
if db not in dbs:
dbs[db] = {}
if tbl not in dbs[db]:
dbs[db][tbl] = {}
dbs[db][tbl].update(kb.data.cachedColumns[db][tbl])
if db in foundCols[column]:
foundCols[column][db].append(tbl)
else:
foundCols[column][db] = [tbl]
kb.data.cachedColumns = {}
if not values and isInferenceAvailable() and not conf.direct:
if not conf.db:
infoMsg = "fetching number of databases with tables containing column"
if colConsider == "1":
infoMsg += "s like"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
logger.info("%s%s%s" % (infoMsg, infoMsgTbl, infoMsgDb))
query = rootQuery.blind.count
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no databases have tables containing column"
if colConsider == "1":
warnMsg += "s like"
warnMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
logger.warn("%s%s" % (warnMsg, infoMsgTbl))
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
query = agent.limitQuery(index, query)
db = unArrayizeValue(inject.getValue(query, union=False, error=False))
db = safeSQLIdentificatorNaming(db)
if db not in dbs:
dbs[db] = {}
if db not in foundCols[column]:
foundCols[column][db] = []
else:
for db in conf.db.split(",") if conf.db else (self.getCurrentDb(),):
db = safeSQLIdentificatorNaming(db)
if db not in foundCols[column]:
foundCols[column][db] = []
origDb = conf.db
origTbl = conf.tbl
for column, dbData in foundCols.items():
colQuery = "%s%s" % (colCond, colCondParam)
colQuery = colQuery % unsafeSQLIdentificatorNaming(column)
for db in dbData:
conf.db = origDb
conf.tbl = origTbl
infoMsg = "fetching number of tables containing column"
if colConsider == "1":
infoMsg += "s like"
infoMsg += " '%s' in database '%s'" % (unsafeSQLIdentificatorNaming(column), unsafeSQLIdentificatorNaming(db))
logger.info(infoMsg)
query = rootQuery.blind.count2
query = query % unsafeSQLIdentificatorNaming(db)
query += " AND %s" % colQuery
query += whereTblsQuery
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no tables contain column"
if colConsider == "1":
warnMsg += "s like"
warnMsg += " '%s' " % unsafeSQLIdentificatorNaming(column)
warnMsg += "in database '%s'" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query2
if query.endswith("'%s')"):
query = query[:-1] + " AND %s)" % (colQuery + whereTblsQuery)
else:
query += " AND %s" % (colQuery + whereTblsQuery)
query = safeStringFormat(query, unsafeSQLIdentificatorNaming(db))
query = agent.limitQuery(index, query)
tbl = unArrayizeValue(inject.getValue(query, union=False, error=False))
kb.hintValue = tbl
tbl = safeSQLIdentificatorNaming(tbl, True)
conf.db = db
conf.tbl = tbl
conf.col = column
self.getColumns(onlyColNames=True, colTuple=(colConsider, colCondParam), bruteForce=False)
if db in kb.data.cachedColumns and tbl in kb.data.cachedColumns[db]:
if db not in dbs:
dbs[db] = {}
if tbl not in dbs[db]:
dbs[db][tbl] = {}
dbs[db][tbl].update(kb.data.cachedColumns[db][tbl])
kb.data.cachedColumns = {}
if db in foundCols[column]:
foundCols[column][db].append(tbl)
else:
foundCols[column][db] = [tbl]
if dbs:
conf.dumper.dbColumns(foundCols, colConsider, dbs)
self.dumpFoundColumn(dbs, foundCols, colConsider)
else:
warnMsg = "no databases have tables containing any of the "
warnMsg += "provided columns"
logger.warn(warnMsg)
def search(self):
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
for item in ('db', 'tbl', 'col'):
if getattr(conf, item, None):
setattr(conf, item, getattr(conf, item).upper())
if conf.col:
self.searchColumn()
elif conf.tbl:
self.searchTable()
elif conf.db:
self.searchDb()
else:
errMsg = "missing parameter, provide -D, -T or -C along "
errMsg += "with --search"
raise SqlmapMissingMandatoryOptionException(errMsg)
|
py | 1a4a5b295a38549eb8d82bf8bfe8ea901d2b72ec | import disarray
import pandas as pd
import unittest
from disarray.metrics import __all_metrics__
class TestDisarray(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.df_binary = pd.DataFrame([[50, 10], [10, 30]], dtype=int)
cls.classes = ["setosa", "versicolor", "virginica"]
cls.df_multi = pd.DataFrame(
[[13, 0, 0], [0, 10, 6], [0, 0, 9]],
index=cls.classes,
columns=cls.classes,
dtype=int,
)
def test_all_metrics(self):
with self.assertRaises(AttributeError):
getattr(self.df_binary.da, "unused-metric")
detected_metrics = []
for metric in __all_metrics__:
if isinstance(getattr(self.df_binary.da, metric), pd.Series):
detected_metrics.append(metric)
self.assertCountEqual(__all_metrics__, detected_metrics)
def test_accuracy(self):
self.assertAlmostEqual(self.df_binary.da.accuracy.loc[1], 0.80, 2)
self.assertAlmostEqual(self.df_binary.da.micro_accuracy, 0.80, 2)
self.assertAlmostEqual(self.df_multi.da.accuracy.loc["setosa"], 1.0, 2)
self.assertAlmostEqual(self.df_multi.da.micro_accuracy, 0.89, 2)
def test_f1(self):
self.assertAlmostEqual(self.df_binary.da.f1.loc[1], 0.75, 2)
self.assertAlmostEqual(self.df_binary.da.micro_f1, 0.80, 2)
self.assertAlmostEqual(self.df_multi.da.f1.loc["setosa"], 1.0, 2)
self.assertAlmostEqual(self.df_multi.da.micro_f1, 0.84, 2)
def test_false_discovery_rate(self):
self.assertAlmostEqual(
self.df_binary.da.false_discovery_rate.loc[0], 0.17, 2
)
self.assertAlmostEqual(
self.df_binary.da.micro_false_discovery_rate, 0.20, 2
)
self.assertAlmostEqual(
self.df_multi.da.false_discovery_rate.loc["setosa"], 0.0, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_false_discovery_rate, 0.16, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_false_discovery_rate,
1 - self.df_multi.da.micro_precision,
2,
)
def test_false_negative_rate(self):
self.assertAlmostEqual(
self.df_binary.da.false_negative_rate.loc[0], 0.166, 2
)
self.assertAlmostEqual(
self.df_binary.da.micro_false_negative_rate, 0.20, 2
)
self.assertAlmostEqual(
self.df_multi.da.false_negative_rate.loc["setosa"], 0.0, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_false_negative_rate, 0.16, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_false_negative_rate,
1 - self.df_multi.da.micro_true_positive_rate,
2,
)
def test_false_positive_rate(self):
self.assertAlmostEqual(
self.df_binary.da.false_positive_rate.loc[0], 0.25, 2
)
self.assertAlmostEqual(
self.df_binary.da.micro_false_positive_rate, 0.20, 2
)
self.assertAlmostEqual(
self.df_multi.da.false_positive_rate.loc["setosa"], 0.0, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_false_positive_rate, 0.08, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_false_positive_rate,
1 - self.df_multi.da.micro_true_negative_rate,
2,
)
def test_negative_predictive_value(self):
self.assertAlmostEqual(
self.df_binary.da.negative_predictive_value.loc[0], 0.75, 2
)
self.assertAlmostEqual(
self.df_binary.da.micro_negative_predictive_value, 0.80, 2
)
self.assertAlmostEqual(
self.df_multi.da.negative_predictive_value.loc["setosa"], 1.0, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_negative_predictive_value, 0.92, 2
)
def test_positive_predictive_value(self):
self.assertAlmostEqual(
self.df_binary.da.positive_predictive_value.loc[0], 0.83, 2
)
self.assertAlmostEqual(
self.df_binary.da.micro_positive_predictive_value, 0.80, 2
)
self.assertAlmostEqual(
self.df_multi.da.positive_predictive_value.loc["setosa"], 1.0, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_positive_predictive_value, 0.84, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_positive_predictive_value,
1 - self.df_multi.da.micro_false_discovery_rate,
2,
)
def test_precision(self):
self.assertAlmostEqual(self.df_binary.da.precision.loc[1], 0.75, 2)
self.assertAlmostEqual(self.df_binary.da.micro_precision, 0.80, 2)
self.assertAlmostEqual(self.df_multi.da.precision.loc["setosa"], 1.0, 2)
self.assertAlmostEqual(self.df_multi.da.micro_precision, 0.84, 2)
self.assertAlmostEqual(
self.df_multi.da.micro_precision,
1 - self.df_multi.da.micro_false_discovery_rate,
2,
)
def test_recall(self):
self.assertAlmostEqual(self.df_binary.da.recall.loc[1], 0.75, 2)
self.assertAlmostEqual(self.df_binary.da.micro_recall, 0.80, 2)
self.assertAlmostEqual(self.df_multi.da.recall.loc["setosa"], 1.0, 2)
self.assertAlmostEqual(self.df_multi.da.micro_recall, 0.84, 2)
self.assertAlmostEqual(
self.df_multi.da.micro_recall,
1 - self.df_multi.da.micro_false_discovery_rate,
2,
)
def test_specificity(self):
self.assertAlmostEqual(self.df_binary.da.specificity.loc[0], 0.75, 2)
self.assertAlmostEqual(self.df_binary.da.micro_specificity, 0.80, 2)
self.assertAlmostEqual(
self.df_multi.da.specificity.loc["setosa"], 1.0, 2
)
self.assertAlmostEqual(self.df_multi.da.micro_specificity, 0.92, 2)
self.assertAlmostEqual(
self.df_multi.da.micro_specificity,
1 - self.df_multi.da.micro_false_positive_rate,
2,
)
def test_specificity(self):
self.assertAlmostEqual(self.df_binary.da.specificity.loc[0], 0.75, 2)
self.assertAlmostEqual(self.df_binary.da.micro_specificity, 0.80, 2)
self.assertAlmostEqual(
self.df_multi.da.specificity.loc["setosa"], 1.0, 2
)
self.assertAlmostEqual(self.df_multi.da.micro_specificity, 0.92, 2)
self.assertAlmostEqual(
self.df_multi.da.micro_specificity,
1 - self.df_multi.da.micro_false_positive_rate,
2,
)
def test_true_negative_rate(self):
self.assertAlmostEqual(
self.df_binary.da.true_negative_rate.loc[0], 0.75, 2
)
self.assertAlmostEqual(
self.df_binary.da.micro_true_negative_rate, 0.80, 2
)
self.assertAlmostEqual(
self.df_multi.da.true_negative_rate.loc["setosa"], 1.0, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_true_negative_rate, 0.92, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_true_negative_rate,
1 - self.df_multi.da.micro_false_positive_rate,
2,
)
def test_true_positive_rate(self):
self.assertAlmostEqual(
self.df_binary.da.true_positive_rate.loc[0], 0.83, 2
)
self.assertAlmostEqual(
self.df_binary.da.micro_true_positive_rate, 0.80, 2
)
self.assertAlmostEqual(
self.df_multi.da.true_positive_rate.loc["setosa"], 1.0, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_true_positive_rate, 0.84, 2
)
self.assertAlmostEqual(
self.df_multi.da.micro_true_positive_rate,
1 - self.df_multi.da.micro_false_negative_rate,
2,
)
def test_export_metrics(self):
cm = self.df_binary.da.export_metrics(metrics_to_include=None)
self.assertListEqual(cm.index.tolist(), __all_metrics__)
|
py | 1a4a5bb9817688933aa86395ba61d029807ec640 | """
Tests for actors implementation.
"""
from __future__ import absolute_import
from builtins import object
from unittest import TestCase
from mdk_runtime.actors import MessageDispatcher, _ManualLaterCaller as CallLater
from mdk_runtime.promise import PromiseResolver
class RecordingActor(object):
"""
An actor that records operations.
Messages can be forwarded to another actor, but we ensure no infinite
recursion.
"""
def __init__(self, name, record, destination):
self.name = name
self.record = record
self.destination = destination
self.received = set()
def onStart(self, dispatcher):
self.dispatcher = dispatcher
def onStop(self):
pass
def onMessage(self, origin, message):
isNew = message not in self.received
self.received.add(message)
prefix = "{} received {} from {}: ".format(self.name, message, origin.name)
self.record.append(prefix + "start")
# Make sure we don't get into infinite recursion:
if self.destination is not None and isNew:
self.record.append("{} sent {} to {}".format(self.name, message, self.destination.name))
self.dispatcher.tell(self, message, self.destination)
self.record.append(prefix + "end")
class StartingActor(object):
"""
An actor that sends a message on start.
"""
def __init__(self):
self.record = []
def onStart(self, dispatcher):
self.record.append("start started")
dispatcher.tell(self, "hello", self)
self.record.append("start finished")
def onStop(self):
pass
def onMessage(self, origin, message):
self.record.append(message)
class StoppingActor(object):
"""
An actor that sends a message on stop.
"""
def __init__(self):
self.record = []
def onStart(self, dispatcher):
self.record.append("start started")
dispatcher.stopActor(self)
self.record.append("start finished")
def onStop(self):
self.record.append("stopped")
def onMessage(self, origin, message):
pass
class Callback(object):
def __init__(self, record):
self.record = record
def call(self, arg):
self.record.append("callback: " + arg)
class PromiseActor(object):
"""
An actor that resolves a Promise on message receiption.
"""
def __init__(self):
self.record = []
def onStart(self, dispatcher):
self.dispatcher = dispatcher
def onStop(self):
pass
def onMessage(self, origin, message):
self.record.append("start")
resolver = PromiseResolver(self.dispatcher)
resolver.promise.andThen(Callback(self.record))
resolver.resolve("hello")
self.record.append("end")
class BrokenActor(object):
"""
An actor that raises an exception after receiving its first message.
"""
def __init__(self):
self.record = []
def onStart(self, dispatcher):
pass
def onStop(self):
pass
def onMessage(self, origin, message):
self.record.append(message)
if len(self.record) == 1:
raise Exception("oh no!")
class MessageDispatcherTests(TestCase):
"""
Tests for MessageDispatcher.
"""
def setup_method(self, method):
self.dispatcher = MessageDispatcher(CallLater())
def test_no_start_reentrancy(self):
"""
MessageDispatcher does not allow re-entrancy of actor starts.
"""
actor = StartingActor()
self.dispatcher.startActor(actor)
self.dispatcher.pump()
self.assertEqual(actor.record,
["start started",
"start finished",
"hello"])
def test_no_stop_reentrancy(self):
"""
MessageDispatcher does not allow re-entrancy of actor stop.
"""
actor = StoppingActor()
self.dispatcher.startActor(actor)
self.dispatcher.pump()
self.assertEqual(actor.record,
["start started",
"start finished",
"stopped"])
def test_multiple_tell(self):
"""
Calling tell() multiple times still delivers messages.
"""
actor = StartingActor()
self.dispatcher.startActor(actor)
self.dispatcher.pump()
actor.record = [] # Clear startup stuff from the recording
self.dispatcher.tell(actor, "what's", actor)
self.dispatcher.tell(actor, "up", actor)
self.dispatcher.pump()
self.assertEqual(actor.record, ["what's", "up"])
def test_no_message_reentrancy(self):
"""
MessageDispatcher does not allow re-entrancy of message delivery.
"""
record = []
Origin = RecordingActor("Origin", record, None)
A = RecordingActor("A", record, None)
B = RecordingActor("B", record, A)
A.destination = B
self.dispatcher.startActor(A)
self.dispatcher.startActor(B)
self.dispatcher.tell(Origin, 123, A)
self.dispatcher.pump()
self.assertEqual(
record, [
# A receives first message, sends on to B
'A received 123 from Origin: start',
'A sent 123 to B',
'A received 123 from Origin: end',
# *After* it is done, B receives its message:
'B received 123 from A: start',
'B sent 123 to A',
'B received 123 from A: end',
# And only after *that* is done, A receives its message:
'A received 123 from B: start',
'A received 123 from B: end'])
def test_no_promise_reentrancy(self):
"""
MessageDispatcher does not allow re-entrancy of Promise callbacks.
"""
actor = PromiseActor()
self.dispatcher.startActor(actor)
self.dispatcher.tell(actor, "hello", actor)
# Promise callback should only happen *after* message delivery is done:
self.dispatcher.pump()
self.assertEqual(actor.record, ["start", "end", "callback: hello"])
def test_handle_delivery_errors(self):
"""
An exception from delivering a message does not prevent future message
delivery; instead it is caught by the MessageDispatcher.
"""
actor = BrokenActor()
self.dispatcher.startActor(actor)
self.dispatcher.tell(None, "hello", actor)
self.dispatcher.tell(None, "world", actor)
self.dispatcher.pump()
self.assertEqual(actor.record, ["hello", "world"])
|
py | 1a4a5cf4e81bfdd6eef12fbd3425e1abe6913585 | import logging
logging.basicConfig(level=logging.DEBUG)
from aiomailserver.core.controller import MailServerController
import asyncio
def exception_handler(*args, **kwargs):
logging.exception(args)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.set_debug(True)
loop.set_exception_handler(exception_handler)
server = MailServerController(loop=loop)
server.loop.run_until_complete(server.start())
try:
server.loop.run_forever()
finally:
server.loop.run_until_complete(server.close())
|
py | 1a4a5d176777f7beae4d7ee27eb8be54fd2fbd3a | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from bokeh import mpl
from bokeh.plotting import show
# generate some random data
data = 1 + np.random.randn(20, 6)
# Use Seaborn and Matplotlib normally
sns.violinplot(data, color="Set3")
plt.title("Seaborn violin plot in Bokeh")
# Convert to interactive Bokeh plot with one command
show(mpl.to_bokeh(name="violin"))
|
py | 1a4a5ea2ac07b792c17bef9f63fb7309d8f85e69 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from functools import partial
from pathlib import Path
from typing import Optional
from pants.base.build_root import BuildRoot
from pants.fs.archive import TGZ
from pants.init.repro import Repro, Reproducer
from pants.testutil.subsystem.util import global_subsystem_instance
from pants.util.contextutil import pushd, temporary_dir
from pants.util.dirutil import safe_file_dump
class ReproTest(unittest.TestCase):
@staticmethod
def add_file(root: Path, relpath: str, *, content: str = '') -> None:
full_path = Path(root, relpath)
safe_file_dump(str(full_path), payload=content)
def assert_file(
self, root: Path, relpath: str, *, expected_content: Optional[str] = None
) -> None:
full_path = Path(root, relpath)
self.assertTrue(full_path.exists())
if expected_content is not None:
self.assertEqual(expected_content, full_path.read_text())
def assert_not_exists(self, root: Path, relpath: str) -> None:
self.assertFalse(Path(root, relpath).exists())
def test_repro(self) -> None:
"""Verify that Repro object creates expected tar.gz file"""
with temporary_dir() as tmpdir:
fake_buildroot = Path(tmpdir, 'buildroot')
add_file = partial(self.add_file, fake_buildroot)
add_file('.git/foo', content='foo')
add_file('dist/bar', content='bar')
add_file('baz.txt', content='baz')
add_file('qux/quux.txt', content='quux')
repro_file = Path(tmpdir, 'repro.tar.gz')
repro = Repro(str(repro_file), str(fake_buildroot), ignore=['.git', 'dist'])
repro.capture(run_info_dict={'foo': 'bar', 'baz': 'qux'})
extract_dir = Path(tmpdir, 'extract')
TGZ.extract(str(repro_file), str(extract_dir))
assert_file = partial(self.assert_file, extract_dir)
assert_file('baz.txt', expected_content='baz')
assert_file('qux/quux.txt', expected_content='quux')
assert_file('repro.sh')
assert_not_exists = partial(self.assert_not_exists, extract_dir)
assert_not_exists('.git')
assert_not_exists('dist')
def test_ignore_dir(self) -> None:
"""Verify that passing --repro-ignore option ignores the directory"""
# Buildroot is is based on your cwd so we need to step into a fresh
# directory for repro to look at.
root_instance = BuildRoot()
with temporary_dir() as build_root, \
root_instance.temporary(build_root), \
pushd(build_root), \
temporary_dir() as capture_dir:
add_file = partial(self.add_file, build_root)
add_file('pants.ini')
add_file('.git/foo', content='foo')
add_file('dist/bar', content='bar')
add_file('foo/bar', content='baz')
add_file('src/test1', content='test1')
add_file('src/test2', content='test1')
repro_file = Path(capture_dir, 'repro.tar.gz')
options = {
Reproducer.options_scope: dict(
capture=str(repro_file),
ignore=['src'],
)}
repro_sub = global_subsystem_instance(Reproducer, options=options)
repro = repro_sub.create_repro() # This is normally called in pants_exe.
repro.capture(run_info_dict={})
extract_loc = Path(capture_dir, 'extract')
TGZ.extract(str(repro_file), str(extract_loc))
self.assert_file(extract_loc, 'foo/bar', expected_content='baz')
assert_not_exists = partial(self.assert_not_exists, extract_loc)
assert_not_exists('.git')
assert_not_exists('src')
|
py | 1a4a5f27a0bdb02acfd156437b9750e472eb5aae | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Adding New Instrument
=====================
Any new instrument should be a subclass of Instrument and it must have a name.
When a new instrument is added to Workload Automation, the methods of the new
instrument will be found automatically and hooked up to the supported signals.
Once a signal is broadcasted, the corresponding registered method is invoked.
Each method in Instrument must take two arguments, which are self and context.
Supported signals can be found in [... link to signals ...] To make
implementations easier and common, the basic steps to add new instrument is
similar to the steps to add new workload.
Hence, the following methods are sufficient to implement to add new instrument:
- setup: This method is invoked after the workload is setup. All the
necessary setups should go inside this method. Setup, includes operations
like, pushing the files to the target device, install them, clear logs,
etc.
- start: It is invoked just before the workload start execution. Here is
where instrument measures start being registered/taken.
- stop: It is invoked just after the workload execution stops. The measures
should stop being taken/registered.
- update_result: It is invoked after the workload updated its result.
update_result is where the taken measures are added to the result so it
can be processed by Workload Automation.
- teardown is invoked after the workload is teared down. It is a good place
to clean any logs generated by the instrument.
For example, to add an instrument which will trace device errors, we subclass
Instrument and overwrite the variable name.::
#BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
class TraceErrorsInstrument(Instrument):
name = 'trace-errors'
def __init__(self, device):
super(TraceErrorsInstrument, self).__init__(device)
self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
We then declare and implement the aforementioned methods. For the setup method,
we want to push the file to the target device and then change the file mode to
755 ::
def setup(self, context):
self.device.push_file(BINARY_FILE, self.device.working_directory)
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
Then we implemented the start method, which will simply run the file to start
tracing. ::
def start(self, context):
self.device.execute('{} start'.format(self.trace_on_device))
Lastly, we need to stop tracing once the workload stops and this happens in the
stop method::
def stop(self, context):
self.device.execute('{} stop'.format(self.trace_on_device))
The generated result can be updated inside update_result, or if it is trace, we
just pull the file to the host device. context has a result variable which
has add_metric method. It can be used to add the instrumentation results metrics
to the final result for the workload. The method can be passed 4 params, which
are metric key, value, unit and lower_is_better, which is a boolean. ::
def update_result(self, context):
# pull the trace file to the device
result = os.path.join(self.device.working_directory, 'trace.txt')
self.device.pull_file(result, context.working_directory)
# parse the file if needs to be parsed, or add result to
# context.result
At the end, we might want to delete any files generated by the instrumentation
and the code to clear these file goes in teardown method. ::
def teardown(self, context):
self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt'))
"""
import logging
import inspect
from collections import OrderedDict
import wlauto.core.signal as signal
from wlauto.core.extension import Extension
from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError
from wlauto.utils.misc import get_traceback, isiterable
from wlauto.utils.types import identifier
logger = logging.getLogger('instrumentation')
# Maps method names onto signals the should be registered to.
# Note: the begin/end signals are paired -- if a begin_ signal is sent,
# then the corresponding end_ signal is guaranteed to also be sent.
# Note: using OrderedDict to preserve logical ordering for the table generated
# in the documentation
SIGNAL_MAP = OrderedDict([
# Below are "aliases" for some of the more common signals to allow
# instrumentation to have similar structure to workloads
('initialize', signal.RUN_INIT),
('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('start', signal.BEFORE_WORKLOAD_EXECUTION),
('stop', signal.AFTER_WORKLOAD_EXECUTION),
('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('finalize', signal.RUN_FIN),
('on_run_start', signal.RUN_START),
('on_run_end', signal.RUN_END),
('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
('on_iteration_start', signal.ITERATION_START),
('on_iteration_end', signal.ITERATION_END),
('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
('after_initial_boot', signal.AFTER_INITIAL_BOOT),
('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
('before_boot', signal.BEFORE_BOOT),
('on_successful_boot', signal.SUCCESSFUL_BOOT),
('after_boot', signal.AFTER_BOOT),
('on_spec_init', signal.SPEC_INIT),
('on_run_init', signal.RUN_INIT),
('on_iteration_init', signal.ITERATION_INIT),
('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
('on_error', signal.ERROR_LOGGED),
('on_warning', signal.WARNING_LOGGED),
])
PRIORITY_MAP = OrderedDict([
('very_fast_', 20),
('fast_', 10),
('normal_', 0),
('slow_', -10),
('very_slow_', -20),
])
installed = []
def is_installed(instrument):
if isinstance(instrument, Instrument):
if instrument in installed:
return True
if instrument.name in [i.name for i in installed]:
return True
elif isinstance(instrument, type):
if instrument in [i.__class__ for i in installed]:
return True
else: # assume string
if identifier(instrument) in [identifier(i.name) for i in installed]:
return True
return False
def is_enabled(instrument):
if isinstance(instrument, Instrument) or isinstance(instrument, type):
name = instrument.name
else: # assume string
name = instrument
try:
installed_instrument = get_instrument(name)
return installed_instrument.is_enabled
except ValueError:
return False
failures_detected = False
def reset_failures():
global failures_detected # pylint: disable=W0603
failures_detected = False
def check_failures():
result = failures_detected
reset_failures()
return result
class ManagedCallback(object):
"""
This wraps instruments' callbacks to ensure that errors do interfer
with run execution.
"""
def __init__(self, instrument, callback):
self.instrument = instrument
self.callback = callback
def __call__(self, context):
if self.instrument.is_enabled:
try:
self.callback(context)
except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.error('Error in instrument {}'.format(self.instrument.name))
global failures_detected # pylint: disable=W0603
failures_detected = True
if isinstance(e, WAError):
logger.error(e)
else:
tb = get_traceback()
logger.error(tb)
logger.error('{}({})'.format(e.__class__.__name__, e))
if not context.current_iteration:
# Error occureed outside of an iteration (most likely
# during intial setup or teardown). Since this would affect
# the rest of the run, mark the instument as broken so that
# it doesn't get re-enabled for subsequent iterations.
self.instrument.is_broken = True
disable(self.instrument)
# Need this to keep track of callbacks, because the dispatcher only keeps
# weak references, so if the callbacks aren't referenced elsewhere, they will
# be deallocated before they've had a chance to be invoked.
_callbacks = []
def install(instrument):
"""
This will look for methods (or any callable members) with specific names
in the instrument and hook them up to the corresponding signals.
:param instrument: Instrument instance to install.
"""
logger.debug('Installing instrument %s.', instrument)
if is_installed(instrument):
raise ValueError('Instrument {} is already installed.'.format(instrument.name))
for attr_name in dir(instrument):
priority = 0
stripped_attr_name = attr_name
for key, value in PRIORITY_MAP.iteritems():
if attr_name.startswith(key):
stripped_attr_name = attr_name[len(key):]
priority = value
break
if stripped_attr_name in SIGNAL_MAP:
attr = getattr(instrument, attr_name)
if not callable(attr):
raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
argspec = inspect.getargspec(attr)
arg_num = len(argspec.args)
# Instrument callbacks will be passed exactly two arguments: self
# (the instrument instance to which the callback is bound) and
# context. However, we also allow callbacks to capture the context
# in variable arguments (declared as "*args" in the definition).
if arg_num > 2 or (arg_num < 2 and argspec.varargs is None):
message = '{} must take exactly 2 positional arguments; {} given.'
raise ValueError(message.format(attr_name, arg_num))
logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
mc = ManagedCallback(instrument, attr)
_callbacks.append(mc)
signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
installed.append(instrument)
def uninstall(instrument):
instrument = get_instrument(instrument)
installed.remove(instrument)
def validate():
for instrument in installed:
instrument.validate()
def get_instrument(inst):
if isinstance(inst, Instrument):
return inst
for installed_inst in installed:
if identifier(installed_inst.name) == identifier(inst):
return installed_inst
raise ValueError('Instrument {} is not installed'.format(inst))
def disable_all():
for instrument in installed:
_disable_instrument(instrument)
def enable_all():
for instrument in installed:
_enable_instrument(instrument)
def enable(to_enable):
if isiterable(to_enable):
for inst in to_enable:
_enable_instrument(inst)
else:
_enable_instrument(to_enable)
def disable(to_disable):
if isiterable(to_disable):
for inst in to_disable:
_disable_instrument(inst)
else:
_disable_instrument(to_disable)
def _enable_instrument(inst):
inst = get_instrument(inst)
if not inst.is_broken:
logger.debug('Enabling instrument {}'.format(inst.name))
inst.is_enabled = True
else:
logger.debug('Not enabling broken instrument {}'.format(inst.name))
def _disable_instrument(inst):
inst = get_instrument(inst)
if inst.is_enabled:
logger.debug('Disabling instrument {}'.format(inst.name))
inst.is_enabled = False
def get_enabled():
return [i for i in installed if i.is_enabled]
def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(Extension):
"""
Base class for instrumentation implementations.
"""
def __init__(self, device, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.device = device
self.is_enabled = True
self.is_broken = False
def initialize(self, context):
pass
def finalize(self, context):
pass
def __str__(self):
return self.name
def __repr__(self):
return 'Instrument({})'.format(self.name)
|
py | 1a4a5fd03a4c84838c6e178c10fe81569cb62004 | previous = 0
match = 0
with open('input', 'r') as file:
data = file.read().split('\n')[1:-1]
for a in data:
if int(a) > previous:
match += 1
previous = int(a)
print('Higher {}'.format(match))
|
py | 1a4a60579a7f1eea84bcf18099f60c264ca07c63 | import collections
from . import HydrusConstants as HC
import os
import random
import time
import unittest
from . import HydrusData
from . import HydrusGlobals as HG
from . import HydrusNetworking
from mock import patch
now = HydrusData.GetNow()
now_10 = now + 10
now_20 = now + 20
with patch.object( HydrusData, 'GetNow', return_value = now ):
HIGH_USAGE = HydrusNetworking.BandwidthTracker()
for i in range( 100 ):
HIGH_USAGE.ReportRequestUsed()
HIGH_USAGE.ReportDataUsed( random.randint( 512, 1024 ) )
LOW_USAGE = HydrusNetworking.BandwidthTracker()
LOW_USAGE.ReportRequestUsed()
LOW_USAGE.ReportDataUsed( 1024 )
ZERO_USAGE = HydrusNetworking.BandwidthTracker()
class TestBandwidthRules( unittest.TestCase ):
def test_no_rules( self ):
rules = HydrusNetworking.BandwidthRules()
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertTrue( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
def test_per_sec( self ):
# at short time deltas, we can always start based on data alone
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 1, 10240 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertTrue( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertFalse( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertTrue( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertTrue( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 1, 1 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertFalse( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertTrue( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertTrue( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 1, 10240 )
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 1, 1 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertFalse( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertFalse( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertTrue( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertTrue( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
def test_per_min( self ):
# cutoff is 15s for continue
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 60, 10240 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 60, 10 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 60, 10240 )
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 60, 10 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
def test_per_month( self ):
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, None, 10240 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, None, 10 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, None, 10240 )
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, None, 10 )
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_10 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
with patch.object( HydrusData, 'GetNow', return_value = now_20 ):
self.assertTrue( rules.CanStartRequest( ZERO_USAGE ) )
self.assertTrue( rules.CanStartRequest( LOW_USAGE ) )
self.assertFalse( rules.CanStartRequest( HIGH_USAGE ) )
self.assertTrue( rules.CanContinueDownload( ZERO_USAGE ) )
self.assertTrue( rules.CanContinueDownload( LOW_USAGE ) )
self.assertTrue( rules.CanContinueDownload( HIGH_USAGE ) )
class TestBandwidthTracker( unittest.TestCase ):
def test_bandwidth_tracker( self ):
bandwidth_tracker = HydrusNetworking.BandwidthTracker()
self.assertEqual( bandwidth_tracker.GetCurrentMonthSummary(), 'used 0B in 0 requests this month' )
now = HydrusData.GetNow()
with patch.object( HydrusData, 'GetNow', return_value = now ):
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 1 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 2 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 6 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 6 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 3600 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 3600 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, None ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, None ), 0 )
#
bandwidth_tracker.ReportDataUsed( 1024 )
bandwidth_tracker.ReportRequestUsed()
self.assertEqual( bandwidth_tracker.GetCurrentMonthSummary(), 'used 1.0KB in 1 requests this month' )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 1 ), 1 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 2 ), 1 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 6 ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 6 ), 1 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 3600 ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 3600 ), 1 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, None ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, None ), 1 )
#
five_secs_from_now = now + 5
with patch.object( HydrusData, 'GetNow', return_value = five_secs_from_now ):
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 1 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 2 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 6 ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 6 ), 1 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 3600 ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 3600 ), 1 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, None ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, None ), 1 )
#
bandwidth_tracker.ReportDataUsed( 32 )
bandwidth_tracker.ReportRequestUsed()
bandwidth_tracker.ReportDataUsed( 32 )
bandwidth_tracker.ReportRequestUsed()
self.assertEqual( bandwidth_tracker.GetCurrentMonthSummary(), 'used 1.1KB in 3 requests this month' )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 64 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 1 ), 2 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 64 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 2 ), 2 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 6 ), 1088 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 6 ), 3 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 3600 ), 1088 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 3600 ), 3 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, None ), 1088 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, None ), 3 )
|
py | 1a4a6255d894c9519858cd8541d36c3a5cb6b8a9 | from email.mime.text import MIMEText
import random
import smtplib
import jinja2
import datetime
from pebbles.client import PBClient
from pebbles.models import Instance
from pebbles.tasks.celery_app import logger, get_token, local_config, get_dynamic_config
from pebbles.tasks.provisioning_tasks import run_update
from pebbles.tasks.celery_app import celery_app
@celery_app.task(name="pebbles.tasks.periodic_update")
def periodic_update():
""" Runs periodic updates.
In particular sets old instances up for deprovisioning after they are past
their maximum_lifetime and sets instances up for up updates.
Both deletion and update events are not guaranteed to take place
immediately. If there are more than 10 instances a random sample of 10
updates and deletions will take place to ensure task is safe to run and
won't slow down other tasks.
"""
token = get_token()
pbclient = PBClient(token, local_config['INTERNAL_API_BASE_URL'], ssl_verify=False)
instances = pbclient.get_instances()
deprovision_list = []
update_list = []
for instance in instances:
logger.debug('checking instance for actions %s' % instance['name'])
deprovision_required = False
if instance.get('state') in [Instance.STATE_RUNNING]:
if not instance.get('lifetime_left') and instance.get('maximum_lifetime'):
deprovision_required = True
if deprovision_required:
deprovision_list.append(instance)
elif instance.get('state') not in [Instance.STATE_FAILED]:
update_list.append(instance)
# ToDo: refactor magic number to variable
if len(deprovision_list) > 10:
deprovision_list = random.sample(deprovision_list, 10)
for instance in deprovision_list:
logger.info('deprovisioning triggered for %s (reason: maximum lifetime exceeded)' % instance.get('id'))
pbclient.do_instance_patch(instance['id'], {'to_be_deleted': True})
run_update.delay(instance.get('id'))
if len(update_list) > 10:
update_list = random.sample(update_list, 10)
for instance in update_list:
run_update.delay(instance.get('id'))
@celery_app.task(name="pebbles.tasks.user_blueprint_cleanup")
def user_blueprint_cleanup():
token = get_token()
pbclient = PBClient(token, local_config['INTERNAL_API_BASE_URL'], ssl_verify=False)
users = pbclient.get_users()
for user in users:
if not user.get('is_deleted') and user.get('expiry_date') and datetime.datetime.strptime(user.get('expiry_date'), '%a, %d %b %Y %H:%M:%S -0000') <= datetime.datetime.utcnow():
pbclient.user_delete(user['id'])
blueprints = pbclient.get_blueprints()
for blueprint in blueprints:
if blueprint.get('expiry_time') and datetime.datetime.strptime(blueprint.get('expiry_time'), '%a, %d %b %Y %H:%M:%S -0000') <= datetime.datetime.utcnow():
pbclient.blueprint_delete(blueprint['id'])
@celery_app.task(name="pebbles.tasks.send_mails")
def send_mails(users, text=None):
""" ToDo: document. apparently sends activation emails.
"""
dynamic_config = get_dynamic_config()
j2_env = jinja2.Environment(loader=jinja2.PackageLoader('pebbles', 'templates'))
base_url = dynamic_config['BASE_URL'].strip('/')
# Here email_id is used to send email because sending emails through eppn might not work in some cases
for email_id, token, user_active in users:
if text is None:
activation_url = '%s/#/activate/%s' % (base_url, token)
msg = MIMEText(j2_env.get_template('invitation.txt').render(activation_link=activation_url, instance_name=dynamic_config['INSTALLATION_NAME'], instance_description=dynamic_config['INSTALLATION_DESCRIPTION'], user_active=user_active))
subject = '%s account activation' if not user_active else '%s password reset'
msg['Subject'] = subject % dynamic_config['INSTALLATION_NAME']
else:
msg = MIMEText(text['message'])
subject = text['subject'] + " - %s"
msg['Subject'] = subject % dynamic_config['INSTALLATION_NAME']
msg['To'] = email_id
msg['From'] = dynamic_config['SENDER_EMAIL']
logger.info(msg)
if not dynamic_config['MAIL_SUPPRESS_SEND']:
s = smtplib.SMTP(dynamic_config['MAIL_SERVER'])
if dynamic_config['MAIL_USE_TLS']:
s.starttls()
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
else:
logger.info('Mail sending suppressed in config')
|
py | 1a4a631d6b2a052c531ff2a2a879cfa90aacdb85 | import os
import re
from datetime import timedelta
from typing import Any, Dict, List, Optional
from unittest import mock, skipUnless
from unittest.mock import MagicMock, call, patch
from django.apps import apps
from django.conf import settings
from django.core.management import call_command, find_commands
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from confirmation.models import RealmCreationKey, generate_realm_creation_url
from zerver.lib.actions import do_add_reaction, do_create_user
from zerver.lib.management import CommandError, ZulipBaseCommand, check_config
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import most_recent_message, stdout_suppressed
from zerver.models import (
Message,
Reaction,
Realm,
Recipient,
UserProfile,
get_realm,
get_stream,
get_user_profile_by_email,
)
class TestCheckConfig(ZulipTestCase):
def test_check_config(self) -> None:
check_config()
with self.settings(REQUIRED_SETTINGS=[('asdf', 'not asdf')]):
with self.assertRaisesRegex(CommandError, "Error: You must set asdf in /etc/zulip/settings.py."):
check_config()
@override_settings(WARN_NO_EMAIL=True)
def test_check_send_email(self) -> None:
with self.assertRaisesRegex(CommandError, "Outgoing email not yet configured, see"):
call_command("send_test_email", '[email protected]')
class TestZulipBaseCommand(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.zulip_realm = get_realm("zulip")
self.command = ZulipBaseCommand()
def test_get_client(self) -> None:
self.assertEqual(self.command.get_client().name, "ZulipServer")
def test_get_realm(self) -> None:
self.assertEqual(self.command.get_realm(dict(realm_id='zulip')), self.zulip_realm)
self.assertEqual(self.command.get_realm(dict(realm_id=None)), None)
self.assertEqual(self.command.get_realm(dict(realm_id=str(self.zulip_realm.id))),
self.zulip_realm)
with self.assertRaisesRegex(CommandError, "There is no realm with id"):
self.command.get_realm(dict(realm_id='17'))
with self.assertRaisesRegex(CommandError, "There is no realm with id"):
self.command.get_realm(dict(realm_id='mit'))
def test_get_user(self) -> None:
mit_realm = get_realm("zephyr")
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
self.assertEqual(self.command.get_user(email, self.zulip_realm), user_profile)
self.assertEqual(self.command.get_user(email, None), user_profile)
error_message = f"The realm '{mit_realm}' does not contain a user with email"
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_user(email, mit_realm)
with self.assertRaisesRegex(CommandError, "server does not contain a user with email"):
self.command.get_user('[email protected]', None)
do_create_user(email, 'password', mit_realm, 'full_name')
with self.assertRaisesRegex(CommandError, "server contains multiple users with that email"):
self.command.get_user(email, None)
def test_get_user_profile_by_email(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
self.assertEqual(get_user_profile_by_email(email), user_profile)
def get_users_sorted(self, options: Dict[str, Any], realm: Optional[Realm],
**kwargs: Any) -> List[UserProfile]:
user_profiles = self.command.get_users(options, realm, **kwargs)
return sorted(user_profiles, key = lambda x: x.email)
def sorted_users(self, users: List[UserProfile]) -> List[UserProfile]:
return sorted(users, key = lambda x: x.email)
def test_get_users(self) -> None:
expected_user_profiles = self.sorted_users([
self.example_user('hamlet'),
self.example_user('iago'),
])
user_emails = ','.join(u.delivery_email for u in expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails), self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails), None)
self.assertEqual(user_profiles, expected_user_profiles)
expected_user_profiles = self.sorted_users([
self.mit_user('sipbtest'),
self.example_user('iago'),
])
user_emails = ','.join(u.delivery_email for u in expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails), None)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = f"The realm '{self.zulip_realm}' does not contain a user with email"
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=user_emails), self.zulip_realm)
self.assertEqual(self.command.get_users(dict(users=self.example_email("iago")), self.zulip_realm),
[self.example_user("iago")])
self.assertEqual(self.command.get_users(dict(users=None), None), [])
def test_get_users_with_all_users_argument_enabled(self) -> None:
expected_user_profiles = self.sorted_users([
self.example_user('hamlet'),
self.example_user('iago'),
])
user_emails = ','.join(u.delivery_email for u in expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails, all_users=False), self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = "You can't use both -u/--users and -a/--all-users."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=user_emails, all_users=True), None)
# Test the default mode excluding bots and deactivated users
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm,
is_active=True, is_bot=False),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True),
self.zulip_realm,
is_bot=False)
self.assertEqual(user_profiles, expected_user_profiles)
# Test the default mode excluding bots and deactivated users
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm,
is_active=True),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True),
self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
# Test include_deactivated
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm,
is_bot=False),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True),
self.zulip_realm,
is_bot=False, include_deactivated=True)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = "You have to pass either -u/--users or -a/--all-users."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=None, all_users=False), None)
error_message = "The --all-users option requires a realm; please pass --realm."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=None, all_users=True), None)
def test_get_non_bot_users(self) -> None:
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm,
is_bot=False),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True),
self.zulip_realm,
is_bot=False)
self.assertEqual(user_profiles, expected_user_profiles)
class TestCommandsCanStart(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.commands = [
command
for app_config in apps.get_app_configs()
if os.path.dirname(os.path.realpath(app_config.path)) == settings.DEPLOY_ROOT
for command in find_commands(os.path.join(app_config.path, "management"))
]
assert self.commands
def test_management_commands_show_help(self) -> None:
with stdout_suppressed():
for command in self.commands:
with self.subTest(management_command=command):
with self.assertRaises(SystemExit):
call_command(command, '--help')
# zerver/management/commands/runtornado.py sets this to True;
# we need to reset it here. See #3685 for details.
settings.RUNNING_INSIDE_TORNADO = False
class TestSendWebhookFixtureMessage(ZulipTestCase):
COMMAND_NAME = 'send_webhook_fixture_message'
def setUp(self) -> None:
super().setUp()
self.fixture_path = os.path.join('some', 'fake', 'path.json')
self.url = '/some/url/with/hook'
@patch('zerver.management.commands.send_webhook_fixture_message.Command.print_help')
def test_check_if_command_exits_when_fixture_param_is_empty(self, print_help_mock: MagicMock) -> None:
with self.assertRaises(CommandError):
call_command(self.COMMAND_NAME, url=self.url)
print_help_mock.assert_any_call('./manage.py', self.COMMAND_NAME)
@patch('zerver.management.commands.send_webhook_fixture_message.Command.print_help')
def test_check_if_command_exits_when_url_param_is_empty(self, print_help_mock: MagicMock) -> None:
with self.assertRaises(CommandError):
call_command(self.COMMAND_NAME, fixture=self.fixture_path)
print_help_mock.assert_any_call('./manage.py', self.COMMAND_NAME)
@patch('zerver.management.commands.send_webhook_fixture_message.os.path.exists')
def test_check_if_command_exits_when_fixture_path_does_not_exist(
self, os_path_exists_mock: MagicMock) -> None:
os_path_exists_mock.return_value = False
with self.assertRaises(CommandError):
call_command(self.COMMAND_NAME, fixture=self.fixture_path, url=self.url)
os_path_exists_mock.assert_any_call(os.path.join(settings.DEPLOY_ROOT, self.fixture_path))
@patch('zerver.management.commands.send_webhook_fixture_message.os.path.exists')
@patch('zerver.management.commands.send_webhook_fixture_message.Client')
@patch('zerver.management.commands.send_webhook_fixture_message.orjson')
@patch("zerver.management.commands.send_webhook_fixture_message.open", create=True)
def test_check_if_command_post_request_to_url_with_fixture(self,
open_mock: MagicMock,
orjson_mock: MagicMock,
client_mock: MagicMock,
os_path_exists_mock: MagicMock) -> None:
orjson_mock.loads.return_value = {}
orjson_mock.dumps.return_value = b"{}"
os_path_exists_mock.return_value = True
client = client_mock()
with self.assertRaises(CommandError):
call_command(self.COMMAND_NAME, fixture=self.fixture_path, url=self.url)
self.assertTrue(orjson_mock.dumps.called)
self.assertTrue(orjson_mock.loads.called)
self.assertTrue(open_mock.called)
client.post.assert_called_once_with(self.url, b"{}", content_type="application/json",
HTTP_HOST="zulip.testserver")
class TestGenerateRealmCreationLink(ZulipTestCase):
COMMAND_NAME = "generate_realm_creation_link"
@override_settings(OPEN_REALM_CREATION=False)
def test_generate_link_and_create_realm(self) -> None:
email = "[email protected]"
generated_link = generate_realm_creation_url(by_admin=True)
# Get realm creation page
result = self.client_get(generated_link)
self.assert_in_success_response(["Create a new Zulip organization"], result)
# Enter email
with self.assertRaises(Realm.DoesNotExist):
get_realm('test')
result = self.client_post(generated_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(re.search(r'/accounts/do_confirm/\w+$', result["Location"]))
# Bypass sending mail for confirmation, go straight to creation form
result = self.client_get(result["Location"])
self.assert_in_response('action="/accounts/register/"', result)
# Original link is now dead
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_generate_link_confirm_email(self) -> None:
email = "[email protected]"
generated_link = generate_realm_creation_url(by_admin=False)
result = self.client_post(generated_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(re.search(f'/accounts/new/send_confirm/{email}$',
result["Location"]))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started", result)
# Original link is now dead
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_realm_creation_with_random_link(self) -> None:
# Realm creation attempt with an invalid link should fail
random_link = "/new/5e89081eb13984e0f3b130bf7a4121d153f1614b"
result = self.client_get(random_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_realm_creation_with_expired_link(self) -> None:
generated_link = generate_realm_creation_url(by_admin=True)
key = generated_link[-24:]
# Manually expire the link by changing the date of creation
obj = RealmCreationKey.objects.get(creation_key=key)
obj.date_created = obj.date_created - timedelta(days=settings.REALM_CREATION_LINK_VALIDITY_DAYS + 1)
obj.save()
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@skipUnless(settings.ZILENCER_ENABLED, "requires zilencer")
class TestCalculateFirstVisibleMessageID(ZulipTestCase):
COMMAND_NAME = 'calculate_first_visible_message_id'
def test_check_if_command_calls_maybe_update_first_visible_message_id(self) -> None:
func_name = "zilencer.management.commands.calculate_first_visible_message_id.maybe_update_first_visible_message_id"
with patch(func_name) as m:
call_command(self.COMMAND_NAME, "--realm=zulip", "--lookback-hours=30")
m.assert_called_with(get_realm("zulip"), 30)
with patch(func_name) as m:
call_command(self.COMMAND_NAME, "--lookback-hours=35")
calls = [call(realm, 35) for realm in Realm.objects.all()]
m.assert_has_calls(calls, any_order=True)
class TestPasswordRestEmail(ZulipTestCase):
COMMAND_NAME = "send_password_reset_email"
def test_if_command_sends_password_reset_email(self) -> None:
call_command(self.COMMAND_NAME, users=self.example_email("iago"))
from django.core.mail import outbox
self.assertRegex(
outbox[0].from_email,
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn("reset your password", outbox[0].body)
class TestRealmReactivationEmail(ZulipTestCase):
COMMAND_NAME = "send_realm_reactivation_email"
def test_if_realm_not_deactivated(self) -> None:
realm = get_realm('zulip')
with self.assertRaisesRegex(CommandError, f"The realm {realm.name} is already active."):
call_command(self.COMMAND_NAME, "--realm=zulip")
class TestSendToEmailMirror(ZulipTestCase):
COMMAND_NAME = "send_to_email_mirror"
def test_sending_a_fixture(self) -> None:
fixture_path = "zerver/tests/fixtures/email/1.txt"
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
with self.assertLogs('zerver.lib.email_mirror', level='INFO') as info_log:
call_command(self.COMMAND_NAME, f"--fixture={fixture_path}")
self.assertEqual(info_log.output, [
'INFO:zerver.lib.email_mirror:Successfully processed email to Denmark (zulip)'
])
message = most_recent_message(user_profile)
# last message should be equal to the body of the email in 1.txt
self.assertEqual(message.content, "Email fixture 1.txt body")
def test_sending_a_json_fixture(self) -> None:
fixture_path = "zerver/tests/fixtures/email/1.json"
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark")
with self.assertLogs('zerver.lib.email_mirror', level='INFO') as info_log:
call_command(self.COMMAND_NAME, f"--fixture={fixture_path}")
self.assertEqual(info_log.output, [
'INFO:zerver.lib.email_mirror:Successfully processed email to Denmark (zulip)'
])
message = most_recent_message(user_profile)
# last message should be equal to the body of the email in 1.json
self.assertEqual(message.content, "Email fixture 1.json body")
def test_stream_option(self) -> None:
fixture_path = "zerver/tests/fixtures/email/1.txt"
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, "Denmark2")
with self.assertLogs('zerver.lib.email_mirror', level='INFO') as info_log:
call_command(self.COMMAND_NAME, f"--fixture={fixture_path}", "--stream=Denmark2")
self.assertEqual(info_log.output, [
'INFO:zerver.lib.email_mirror:Successfully processed email to Denmark2 (zulip)'
])
message = most_recent_message(user_profile)
# last message should be equal to the body of the email in 1.txt
self.assertEqual(message.content, "Email fixture 1.txt body")
stream_id = get_stream("Denmark2", get_realm("zulip")).id
self.assertEqual(message.recipient.type, Recipient.STREAM)
self.assertEqual(message.recipient.type_id, stream_id)
class TestConvertMattermostData(ZulipTestCase):
COMMAND_NAME = 'convert_mattermost_data'
def test_if_command_calls_do_convert_data(self) -> None:
with patch('zerver.management.commands.convert_mattermost_data.do_convert_data') as m, \
patch('builtins.print') as mock_print:
mm_fixtures = self.fixture_file_name("", "mattermost_fixtures")
output_dir = self.make_import_output_dir("mattermost")
call_command(self.COMMAND_NAME, mm_fixtures, f"--output={output_dir}")
m.assert_called_with(
masking_content=False,
mattermost_data_dir=os.path.realpath(mm_fixtures),
output_dir=os.path.realpath(output_dir),
)
self.assertEqual(mock_print.mock_calls, [
call('Converting data ...')
])
@skipUnless(settings.ZILENCER_ENABLED, "requires zilencer")
class TestInvoicePlans(ZulipTestCase):
COMMAND_NAME = 'invoice_plans'
def test_if_command_calls_invoice_plans_as_needed(self) -> None:
with patch('zilencer.management.commands.invoice_plans.invoice_plans_as_needed') as m:
call_command(self.COMMAND_NAME)
m.assert_called_once()
class TestExport(ZulipTestCase):
COMMAND_NAME = 'export'
def test_command_with_consented_message_id(self) -> None:
realm = get_realm("zulip")
self.send_stream_message(self.example_user("othello"), "Verona",
topic_name="Export",
content="Outbox emoji for export")
message = Message.objects.last()
do_add_reaction(self.example_user("iago"), message, "outbox", "1f4e4", Reaction.UNICODE_EMOJI)
do_add_reaction(self.example_user("hamlet"), message, "outbox", "1f4e4", Reaction.UNICODE_EMOJI)
with patch("zerver.management.commands.export.export_realm_wrapper") as m, \
patch('builtins.print') as mock_print, \
patch('builtins.input', return_value='y') as mock_input:
call_command(self.COMMAND_NAME, "-r=zulip", f"--consent-message-id={message.id}")
m.assert_called_once_with(realm=realm, public_only=False, consent_message_id=message.id,
delete_after_upload=False, threads=mock.ANY, output_dir=mock.ANY,
percent_callback=mock.ANY,
upload=False)
mock_input.assert_called_once_with('Continue? [y/N] ')
self.assertEqual(mock_print.mock_calls, [
call('\033[94mExporting realm\033[0m: zulip'),
call('\n\033[94mMessage content:\033[0m\nOutbox emoji for export\n'),
call('\033[94mNumber of users that reacted outbox:\033[0m 2 / 8 total non-guest users\n'),
])
with self.assertRaisesRegex(CommandError, "Message with given ID does not"), \
patch('builtins.print') as mock_print:
call_command(self.COMMAND_NAME, "-r=zulip", "--consent-message-id=123456")
self.assertEqual(mock_print.mock_calls, [
call('\033[94mExporting realm\033[0m: zulip'),
])
message.last_edit_time = timezone_now()
message.save()
with self.assertRaisesRegex(CommandError, "Message was edited. Aborting..."), \
patch('builtins.print') as mock_print:
call_command(self.COMMAND_NAME, "-r=zulip", f"--consent-message-id={message.id}")
self.assertEqual(mock_print.mock_calls, [
call('\033[94mExporting realm\033[0m: zulip'),
])
message.last_edit_time = None
message.save()
do_add_reaction(self.mit_user("sipbtest"), message, "outbox", "1f4e4", Reaction.UNICODE_EMOJI)
with self.assertRaisesRegex(CommandError, "Users from a different realm reacted to message. Aborting..."), \
patch('builtins.print') as mock_print:
call_command(self.COMMAND_NAME, "-r=zulip", f"--consent-message-id={message.id}")
self.assertEqual(mock_print.mock_calls, [
call('\033[94mExporting realm\033[0m: zulip'),
])
|
py | 1a4a638824c57e80f57e5b1f8162f00660f80ae9 | from django.contrib import admin
from .models import ShopUser
# Register your models here.
admin.site.register(ShopUser)
|
py | 1a4a6396bce761cd4354357f08a4c9a935109db1 | import random
class Page:
def __init__(self, proto, domain, path, status=200, size=None, links=[]):
self.proto = proto
self.domain = domain
self.path = path
self.status = status
self.links = links
self.size = size
self.external_links = []
self.images = []
self.external_images = []
self.css = []
self.external_css = []
self.scripts = []
self.external_scripts = []
self.email_addrs = []
def _add_info(self, destination, value):
if value not in destination:
destination.append(value)
def random_link(self):
if not self.links:
return None
index = random.randint(1, len(self.links))
return self.links[index - 1]
def full_uri(self):
return '%s://%s%s' % (self.proto, self.domain, self.path)
def full_address(self):
return '%s%s' % (self.domain, self.path)
def add_link(self, link):
self._add_info(self.links, link)
def add_external_link(self, link):
self._add_info(self.external_links, link)
def add_image(self, image_path):
self._add_info(self.images, image_path)
def add_external_image(self, image_path):
self._add_info(self.external_images, image_path)
def add_script(self, script_path):
self._add_info(self.scripts, script_path)
def add_external_script(self, script_path):
self._add_info(self.external_scripts, script_path)
def add_css(self, css_path):
self._add_info(self.css, css_path)
def add_external_css(self, css_path):
self._add_info(self.external_css, css_path)
def add_email(self, email_address):
self._add_info(self.email_addrs, email_address) |
py | 1a4a642a9e244b2fce42d7c5b6b25abf1bdba177 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synthtool as s
import synthtool.gcp as gcp
import logging
import subprocess
logging.basicConfig(level=logging.DEBUG)
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library()
s.copy(templates)
subprocess.run(['npm', 'install'])
subprocess.run(['npm', 'run', 'fix'])
|
py | 1a4a650d6fe11c022908cbd619858626b8185ac0 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''The base class for task transformer objects'''
import numpy as np
from librosa import time_to_frames, times_like
from librosa.sequence import viterbi_binary, viterbi_discriminative
import jams
from ..base import Scope
__all__ = ['BaseTaskTransformer']
def fill_value(dtype):
'''Get a fill-value for a given dtype
Parameters
----------
dtype : type
Returns
-------
`np.nan` if `dtype` is real or complex
0 otherwise
'''
if np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.complexfloating):
return dtype(np.nan)
return dtype(0)
class BaseTaskTransformer(Scope):
'''Base class for task transformer objects
Attributes
----------
name : str
The name prefix for this transformer object
namespace : str
The JAMS namespace for annotations in this task
sr : number > 0
The sampling rate for audio
hop_length : int > 0
The number of samples between frames
'''
def __init__(self, name, namespace, sr, hop_length):
super(BaseTaskTransformer, self).__init__(name)
# This will trigger an exception if the namespace is not found
jams.schema.is_dense(namespace)
self.namespace = namespace
self.sr = sr
self.hop_length = hop_length
def empty(self, duration):
'''Create an empty jams.Annotation for this task.
This method should be overridden by derived classes.
Parameters
----------
duration : int >= 0
Duration of the annotation
'''
return jams.Annotation(namespace=self.namespace, time=0, duration=0)
def transform(self, jam, query=None):
'''Transform jam object to make data for this task
Parameters
----------
jam : jams.JAMS
The jams container object
query : string, dict, or callable [optional]
An optional query to narrow the elements of `jam.annotations`
to be considered.
If not provided, all annotations are considered.
Returns
-------
data : dict
A dictionary of transformed annotations.
All annotations which can be converted to the target namespace
will be converted.
'''
anns = []
if query:
results = jam.search(**query)
else:
results = jam.annotations
# Find annotations that can be coerced to our target namespace
for ann in results:
try:
anns.append(jams.nsconvert.convert(ann, self.namespace))
except jams.NamespaceError:
pass
duration = jam.file_metadata.duration
# If none, make a fake one
if not anns:
anns = [self.empty(duration)]
# Apply transformations
results = []
for ann in anns:
results.append(self.transform_annotation(ann, duration))
# If the annotation range is None, it spans the entire track
if ann.time is None or ann.duration is None:
valid = [0, duration]
else:
valid = [ann.time, ann.time + ann.duration]
results[-1]['_valid'] = time_to_frames(valid, sr=self.sr,
hop_length=self.hop_length)
# Prefix and collect
return self.merge(results)
def transform_annotation(self, ann, duration):
'''Transform jams.Annotation to make data for a given task.
Parameters
----------
ann : jams.Annotation
The jams annotation containing the data
duration : number > 0
time in seconds of the output duration
Returns
-------
data : dict
A dictionary of transformed annotation.
'''
raise NotImplementedError
def encode_events(self, duration, events, values, dtype=np.bool):
'''Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
'''
frames = time_to_frames(events, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill_value(dtype))
values = values.astype(dtype)
for column, event in zip(values, frames):
target[event] += column
return target[:n_total]
def encode_intervals(self, duration, intervals, values, dtype=np.bool,
multi=True, fill=None):
'''Encode labeled intervals as a time-series matrix.
Parameters
----------
duration : number
The duration (in frames) of the track
intervals : np.ndarray, shape=(n, 2)
The list of intervals
values : np.ndarray, shape=(n, m)
The (encoded) values corresponding to each interval
dtype : np.dtype
The desired output type
multi : bool
If `True`, allow multiple labels per interval.
fill : dtype (optional)
Optional default fill value for missing data.
If not provided, the default is inferred from `dtype`.
Returns
-------
target : np.ndarray, shape=(duration * sr / hop_length, m)
The labeled interval encoding, sampled at the desired frame rate
'''
if fill is None:
fill = fill_value(dtype)
frames = time_to_frames(intervals, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
values = values.astype(dtype)
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill)
for column, interval in zip(values, frames):
if multi:
target[interval[0]:interval[1]] += column
else:
target[interval[0]:interval[1]] = column
return target[:n_total]
def decode_events(self, encoded, transition=None, p_state=None, p_init=None):
'''Decode labeled events into (time, value) pairs
Real-valued inputs are thresholded at 0.5.
Optionally, viterbi decoding can be applied to each event class.
Parameters
----------
encoded : np.ndarray, shape=(n_frames, m)
Frame-level annotation encodings as produced by ``encode_events``.
transition : None or np.ndarray [shape=(2, 2) or (m, 2, 2)]
Optional transition matrix for each event, used for Viterbi
p_state : None or np.ndarray [shape=(m,)]
Optional marginal probability for each event
p_init : None or np.ndarray [shape=(m,)]
Optional marginal probability for each event
Returns
-------
[(time, value)] : iterable of tuples
where `time` is the event time and `value` is an
np.ndarray, shape=(m,) of the encoded value at that time
See Also
--------
librosa.sequence.viterbi_binary
'''
if np.isrealobj(encoded):
if transition is None:
encoded = (encoded >= 0.5)
else:
encoded = viterbi_binary(encoded.T, transition,
p_state=p_state,
p_init=p_init).T
times = times_like(encoded,
sr=self.sr,
hop_length=self.hop_length,
axis=0)
return zip(times, encoded)
def decode_intervals(self, encoded, duration=None, multi=True, sparse=False,
transition=None, p_state=None, p_init=None):
'''Decode labeled intervals into (start, end, value) triples
Parameters
----------
encoded : np.ndarray, shape=(n_frames, m)
Frame-level annotation encodings as produced by
``encode_intervals``
duration : None or float > 0
The max duration of the annotation (in seconds)
Must be greater than the length of encoded array.
multi : bool
If true, allow multiple labels per input frame.
If false, take the most likely label per input frame.
sparse : bool
If true, values are returned as indices, not one-hot.
If false, values are returned as one-hot encodings.
Only applies when `multi=False`.
transition : None or np.ndarray [shape=(m, m) or (2, 2) or (m, 2, 2)]
Optional transition matrix for each interval, used for Viterbi
decoding. If `multi=True`, then transition should be `(2, 2)` or
`(m, 2, 2)`-shaped. If `multi=False`, then transition should be
`(m, m)`-shaped.
p_state : None or np.ndarray [shape=(m,)]
Optional marginal probability for each label.
p_init : None or np.ndarray [shape=(m,)]
Optional marginal probability for each label.
Returns
-------
[(start, end, value)] : iterable of tuples
where `start` and `end` are the interval boundaries (in seconds)
and `value` is an np.ndarray, shape=(m,) of the encoded value
for this interval.
'''
if np.isrealobj(encoded):
if multi:
if transition is None:
encoded = encoded >= 0.5
else:
encoded = viterbi_binary(encoded.T, transition,
p_init=p_init, p_state=p_state).T
elif sparse and encoded.shape[1] > 1:
# map to argmax if it's densely encoded (logits)
if transition is None:
encoded = np.argmax(encoded, axis=1)[:, np.newaxis]
else:
encoded = viterbi_discriminative(encoded.T, transition,
p_init=p_init,
p_state=p_state)[:, np.newaxis]
elif not sparse:
# if dense and multi, map to one-hot encoding
if transition is None:
encoded = (encoded == np.max(encoded, axis=1, keepdims=True))
else:
encoded_ = viterbi_discriminative(encoded.T, transition,
p_init=p_init,
p_state=p_state)
# Map to one-hot encoding
encoded = np.zeros(encoded.shape, dtype=bool)
encoded[np.arange(len(encoded_)), encoded_] = True
if duration is None:
# 1+ is fair here, because encode_intervals already pads
duration = 1 + encoded.shape[0]
else:
duration = 1 + time_to_frames(duration,
sr=self.sr,
hop_length=self.hop_length)
# [0, duration] inclusive
times = times_like(duration + 1,
sr=self.sr, hop_length=self.hop_length)
# Find the change-points of the rows
if sparse:
idx = np.where(encoded[1:] != encoded[:-1])[0]
else:
idx = np.where(np.max(encoded[1:] != encoded[:-1], axis=-1))[0]
idx = np.unique(np.append(idx, encoded.shape[0]))
delta = np.diff(np.append(-1, idx))
# Starting positions can be integrated from changes
position = np.cumsum(np.append(0, delta))
return [(times[p], times[p + d], encoded[p])
for (p, d) in zip(position, delta)]
|
py | 1a4a6534eb5d67832d2decf4214f9391e8518cb5 | #!/usr/bin/env python
import sys
import os
import fvm
import fvm.fvmbaseExt as fvmbaseExt
import fvm.importers as importers
import fvm.exporters_atyped_double as exporters
import time
from numpy import *
import fvm.phonon_atyped_double as pa
import fvm.phononbaseExt as pext
from FluentCase import FluentCase
fvm.set_atype('double')
#print " "
fileBase="./"
filename="FabMesh_8.61e-6mX6.74e-6m"
extension=".cas"
Kn=1000.
KnName='Kn_'+str(Kn)
initialScale=1.e-4
SiBZfile="SiIsoEDIP_"
GeBZfile="GeIsoHarrison_"
BZdisc="2x8x10"
BZfine="original"
levels=4
T1=300.
T2=301.
Tinit=(T1+T2)/2.+0
dimension=3
eVtoJoule=1.60217646e-19
def usage():
print "Usage: %s filebase [outfilename]" % sys.argv[0]
print "Where filebase.cas is a Fluent case file."
print "Output will be in filebase-prism.dat if it is not specified."
sys.exit(1)
reader = FluentCase(fileBase+filename+extension)
reader.read();
meshes = reader.getMeshList()
geomFields = fvm.models.GeomFields('geom')
metricsCalculator = fvm.models.MeshMetricsCalculatorA(geomFields,meshes)
metricsCalculator.init()
SiKspace=pa.KspaceA(fileBase+SiBZfile+BZdisc+".txt",dimension,1)
GeKspace=pa.KspaceA(fileBase+GeBZfile+BZdisc+".txt",dimension,1)
#SiKspaceFine=pa.KspaceA(fileBase+SiBZfile+BZfine+".txt",dimension,1)
#GeKspaceFine=pa.KspaceA(fileBase+GeBZfile+BZfine+".txt",dimension,1)
SikArray=(SiKspace.getHollandConductivity((T1+T2)/2)).asNumPyArray()
SikArray=SikArray*eVtoJoule
print "Silicon Thermal Conductivity Tensor:"
print SikArray[0],' ',SikArray[1],' ',SikArray[2]
print SikArray[3],' ',SikArray[4],' ',SikArray[5]
print SikArray[6],' ',SikArray[7],' ',SikArray[8]
print 'Silicon Specific Heat:',SiKspace.calcSpecificHeat(300)*eVtoJoule
kn0=float(SiKspace.findKnStats(initialScale))
avemfp=kn0*initialScale
#print "Silicon Average mean free path", avemfp
scale=1#avemfp/Kn
kn0=float(SiKspace.findKnStats(scale))
for mesh in meshes:
xNodes = mesh.getNodeCoordinates().asNumPyArray()
xNodes[:,0] *= scale
xNodes[:,1] *= scale
xNodes[:,2] *= scale
metricsCalculator.init()
pmacro=pext.PhononMacro(filename)
Klist=SiKspace.MakeList()
GeKspace.AddToList(Klist)
cmodel=pa.COMETModelA(meshes,0,geomFields,Klist,pmacro)
copts=cmodel.getOptions()
cBCs=cmodel.getBCs()
mkMap=cmodel.getMKMap();
mkMap[0]=0
mkMap[1]=1
FaceArea=cmodel.getWallAreaVector(meshes[0],7)
FaceAreaMag=math.sqrt(FaceArea[0]*FaceArea[0]+FaceArea[1]*FaceArea[1])
#print 'Face Area Magnitude:', FaceAreaMag
print 'Scale:', scale
SiBallisticRate=SiKspace.FindBallisticHeatRate(FaceArea,T1,T2)*eVtoJoule
#print "Silicon Ballistic Heat Rate: ",SiBallisticRate
SiDOS=pa.DOS(SiKspace)
SiDOS.binMode(0,2,0,2.74e13)
SiDOS.binMode(1,2,0,7.43e13)
SiDOS.binMode(2,2,9.7e13,1.035e14)
SiDOS.binMode(3,2,7.42e13,1.035e14)
SiDOS.setDensity()
#SiDOSfine=pa.DOS(SiKspaceFine)
#SiDOSfine.binMode(0,2,0,2.74e13)
#SiDOSfine.binMode(1,2,0,7.43e13)
#SiDOSfine.binMode(2,2,9.7e13,1.035e14)
#SiDOSfine.binMode(3,2,7.42e13,1.035e14)
#SiDOSfine.setDensity()
#print " "
GekArray=(GeKspace.getHollandConductivity((T1+T2)/2)).asNumPyArray()
GekArray=GekArray*eVtoJoule
print "Germanium Thermal Conductivity Tensor:"
print GekArray[0],' ',GekArray[1],' ',GekArray[2]
print GekArray[3],' ',GekArray[4],' ',GekArray[5]
print GekArray[6],' ',GekArray[7],' ',GekArray[8]
print 'Germanium Specific Heat:',GeKspace.calcSpecificHeat(300)*eVtoJoule
kn0=float(GeKspace.findKnStats(scale))
avemfp=kn0*scale
#print "Germanium Average mean free path", avemfp
GeBallisticRate=GeKspace.FindBallisticHeatRate(FaceArea,T1,T2)*eVtoJoule
#print "Germanium Ballistic Heat Rate: ", GeBallisticRate
GeDOS=pa.DOS(GeKspace)
GeDOS.binMode(1,2,0,4.044e13)
GeDOS.binMode(0,2,0,1.5005e13)
GeDOS.binMode(2,2,5.31e13,5.66e13)
GeDOS.binMode(3,2,4.074e13,5.66e13)
GeDOS.setDensity()
#GeDOSfine=pa.DOS(GeKspaceFine)
#GeDOSfine.binMode(1,1,0,4.044e13)
#GeDOSfine.binMode(0,1,0,1.5005e13)
#GeDOSfine.binMode(2,1,5.31e13,5.66e13)
#GeDOSfine.binMode(3,1,4.074e13,5.66e13)
#GeDOSfine.setDensity()
#TtransSiGe=SiDOSfine.makeDMMtransmission(GeDOSfine,Tinit,1)
TtransSiGeDummy=SiDOS.makeDMMtransmission(GeDOS,T1,1)
transSiGe=TtransSiGeDummy.asNumPyArray()
TSiBins=SiDOS.getFreqBins()
SiKspace.setTransmission(GeKspace,TSiBins,TtransSiGeDummy)
SiMids=SiDOS.getFreqMids().asNumPyArray()
#transGeSi=GeDOSfine.makeDMMtransmission(SiDOSfine,Tinit,0)
TtransGeSiDummy=GeDOS.makeDMMtransmission(SiDOS,T1,0)
transGeSi=TtransGeSiDummy.asNumPyArray()
GeKspace.setTransmission(SiKspace,TSiBins,TtransGeSiDummy)
SiKspace.setDOS(SiDOS)
GeKspace.setDOS(GeDOS)
"""
transSiGefile=open("transSiGe","w")
for i in range(SiMids.size):
print SiMids[i],transSiGe[i],transGeSi[i]
transSiGefile.write(str(SiMids[i])+" "+str(transSiGe[i])+"\n")
transSiGefile.close()
"""
SiBallisticInterface=SiKspace.calcBallisticInterface(GeKspace,FaceArea,T1,T2)*eVtoJoule
#print "Si Ballistic", SiBallisticInterface
GeBallisticInterface=GeKspace.calcBallisticInterface(SiKspace,FaceArea,T1,T2)*eVtoJoule
#print "Ge Ballistic", GeBallisticInterface
#sys.exit()
copts["initialTemperature"]=Tinit
copts.showResidual=10
copts.maxLevels=levels
copts.absTolerance=-1e-9
copts.preSweeps=1
copts.postSweeps=2
copts.relFactor=1
copts.withNormal=0
copts.NewtonTol=1e-6
#cBCs[5].bcType="reflecting"
#cBCs[5].bcType="temperature"
cBCs[5]["specifiedTemperature"]=T2
cBCs[5]["specifiedReflection"]=1.
cBCs[3].bcType="temperature"
#cBCs[3].bcType="reflecting"
cBCs[3]["specifiedReflection"]=1.
cBCs[3]["specifiedTemperature"]=T1
cBCs[3]["FullyImplicit"]=0
#cBCs[7].bcType="reflecting"
cBCs[7].bcType="temperature"
cBCs[7]["specifiedReflection"]=1.
cBCs[7]["specifiedTemperature"]=T1
#cBCs[8].bcType="reflecting"
cBCs[8].bcType="temperature"
cBCs[8]["specifiedTemperature"]=T1
cBCs[8]["specifiedReflection"]=1.
cBCs[8]["FullyImplicit"]=0
#cBCs[9].bcType="reflecting"
cBCs[9].bcType="temperature"
cBCs[9]["specifiedTemperature"]=T2
cBCs[9]["specifiedReflection"]=1.
cBCs[9]["FullyImplicit"]=0
#cBCs[10].bcType="reflecting"
cBCs[10].bcType="temperature"
cBCs[10]["specifiedTemperature"]=T2
cBCs[10]["specifiedReflection"]=1.
cBCs[10]["FullyImplicit"]=0
#cBCs[11].bcType="reflecting"
cBCs[11].bcType="temperature"
cBCs[11]["specifiedTemperature"]=T2
cBCs[11]["specifiedReflection"]=1.
cBCs[11]["FullyImplicit"]=0
#cBCs[12].bcType="reflecting"
cBCs[12].bcType="temperature"
cBCs[12]["specifiedTemperature"]=T1
cBCs[12]["specifiedReflection"]=1.
cBCs[12]["FullyImplicit"]=0
#cBCs[13].bcType="reflecting"
cBCs[13].bcType="temperature"
cBCs[13]["specifiedTemperature"]=T2
cBCs[13]["specifiedReflection"]=1.
cBCs[13]["FullyImplicit"]=0
#cBCs[15].bcType="reflecting"
#cBCs[15].bcType="temperature"
cBCs[15]["specifiedTemperature"]=T1
cBCs[15]["specifiedReflection"]=1.
cBCs[15]["FullyImplicit"]=0
print "Initializing..."
cmodel.init()
print "Initialized"
cmodel.advance(0)
initialResid=cmodel.getResidual()
#Mesh 0
wall12=(cmodel.HeatFluxIntegral(meshes[0],12))*eVtoJoule
wall7=(cmodel.HeatFluxIntegral(meshes[0],7))*eVtoJoule
wall9=(cmodel.HeatFluxIntegral(meshes[0],9))*eVtoJoule
wall10=(cmodel.HeatFluxIntegral(meshes[0],10))*eVtoJoule
#Mesh 1
wall3=(cmodel.HeatFluxIntegral(meshes[1],3))*eVtoJoule
wall13=(cmodel.HeatFluxIntegral(meshes[1],13))*eVtoJoule
wall8=(cmodel.HeatFluxIntegral(meshes[1],8))*eVtoJoule
wall11=(cmodel.HeatFluxIntegral(meshes[1],11))*eVtoJoule
#Interfaces
wall5=(cmodel.HeatFluxIntegral(meshes[0],5))*eVtoJoule
wall15=(cmodel.HeatFluxIntegral(meshes[1],15))*eVtoJoule
balance=0#abs(wall5+wall9)/wall9
iteration=0
print iteration,' : ',initialResid,":",wall12+wall3," : ",wall7+wall8,":",wall9+wall13,":",wall10+wall11
t=0.
end=0
begin=0
resid=cmodel.getResidual()
total=10
step=1
balTol=.01
relTol=1.e-10
div_count=0
relRes=1.
while (balance>balTol or relRes>relTol) and iteration<total:
begin=time.clock()
cmodel.advance(step)
end=time.clock()
resid=cmodel.getResidual()
if resid>initialResid:
initialResid=resid
div_count+=1
if div_count>8:
print 'Divergence Detected'
#sys.exit()
t+=(end-begin)
cmodel.applyTemperatureBoundaries()
#Mesh 0
wall12=(cmodel.HeatFluxIntegral(meshes[0],12))*eVtoJoule
wall7=(cmodel.HeatFluxIntegral(meshes[0],7))*eVtoJoule
wall9=(cmodel.HeatFluxIntegral(meshes[0],9))*eVtoJoule
wall10=(cmodel.HeatFluxIntegral(meshes[0],10))*eVtoJoule
#Mesh 1
wall3=(cmodel.HeatFluxIntegral(meshes[1],3))*eVtoJoule
wall13=(cmodel.HeatFluxIntegral(meshes[1],13))*eVtoJoule
wall8=(cmodel.HeatFluxIntegral(meshes[1],8))*eVtoJoule
wall11=(cmodel.HeatFluxIntegral(meshes[1],11))*eVtoJoule
balance=wall12+wall7+wall9+wall10+wall3+wall13+wall8+wall11
iteration+=step
relRes=resid/initialResid
print iteration,' : ',resid,":",relRes,":",wall12+wall3," : ",wall7+wall8,":",wall9+wall13,":",wall10+wall11,":",balance
#Mesh 0
wall12=(cmodel.HeatFluxIntegral(meshes[0],12))*eVtoJoule
wall7=(cmodel.HeatFluxIntegral(meshes[0],7))*eVtoJoule
wall9=(cmodel.HeatFluxIntegral(meshes[0],9))*eVtoJoule
wall10=(cmodel.HeatFluxIntegral(meshes[0],10))*eVtoJoule
#Mesh 1
wall3=(cmodel.HeatFluxIntegral(meshes[1],3))*eVtoJoule
wall13=(cmodel.HeatFluxIntegral(meshes[1],13))*eVtoJoule
wall8=(cmodel.HeatFluxIntegral(meshes[1],8))*eVtoJoule
wall11=(cmodel.HeatFluxIntegral(meshes[1],11))*eVtoJoule
print "Solution Time: ",t
meshlist0=fvmbaseExt.MeshList()
meshlist0.push_back(meshes[0])
meshlist1=fvmbaseExt.MeshList()
meshlist1.push_back(meshes[1])
name_file="FabMesh"
writer0 = exporters.VTKWriterA(geomFields,meshlist0,
name_file+'0'+'.vtk',
"Phonon Transport",
False,0)
writer0.init()
writer0.writeScalarField(pmacro.temperature,"Temperature")
for mode in range(4):
writer0.writeScalarField(pmacro.getModeTemp(0,mode),"Mode"+str(mode)+"Mesh"+str(0))
writer0.finish()
writer1 = exporters.VTKWriterA(geomFields,meshlist1,
name_file+'1'+'.vtk',
"Phonon Transport",
False,0)
writer1.init()
writer1.writeScalarField(pmacro.temperature,"Temperature")
for mode in range(4):
writer1.writeScalarField(pmacro.getModeTemp(1,mode),"Mode"+str(mode)+"Mesh"+str(1))
writer1.finish()
|
py | 1a4a65aaa77a9b7edad0fd3ace1254ecc8492634 | from django.urls import path
from .views import home
urlpatterns = [
path('', home, name='landing'),
]
|
py | 1a4a663bf3a5e775ac819e37e012e61e5218caa7 | from __future__ import print_function
import os
import pickle
import numpy
import time
import numpy as np
from scipy.spatial import distance
import torch
from torch.autograd import Variable
from basic.metric import getScorer
from basic.util import AverageMeter, LogCollector
def l2norm(X):
"""L2-normalize columns of X
"""
norm = np.linalg.norm(X, axis=1, keepdims=True)
return 1.0 * X / norm
def cal_error(videos, captions, measure='cosine'):
if measure == 'cosine':
captions = l2norm(captions)
videos = l2norm(videos)
errors = -1*numpy.dot(captions, videos.T)
elif measure == 'euclidean':
errors = distance.cdist(captions, videos, 'euclidean')
return errors
def encode_data2(model, data_loader, log_step=10, logging=print, return_ids=True):
"""Encode all videos and captions loadable by `data_loader`
"""
batch_time = AverageMeter()
val_logger = LogCollector()
# switch to evaluate mode
model.val_start()
end = time.time()
# numpy array to keep all the embeddings
video_embs = None
cap_embs = None
video_ids = ['']*len(data_loader.dataset)
caption_ids = ['']*len(data_loader.dataset)
print('haha')
for i, (videos, captions, idxs, cap_ids, vid_ids) in enumerate(data_loader):
# make sure val logger is used
model.logger = val_logger
# compute the embeddings
# vid_emb, cap_emb = model.forward_emb(videos, captions, volatile=True)
_, vid_emb, _, _ = videos
_, cap_emb, _, _ = captions
# initialize the numpy arrays given the size of the embeddings
if video_embs is None:
video_embs = np.zeros((len(data_loader.dataset), vid_emb.size(1)))
cap_embs = np.zeros((len(data_loader.dataset), cap_emb.size(1)))
# preserve the embeddings by copying from gpu and converting to numpy
video_embs[idxs] = vid_emb.data.cpu().numpy().copy()
cap_embs[idxs] = cap_emb.data.cpu().numpy().copy()
for j, idx in enumerate(idxs):
caption_ids[idx] = cap_ids[j]
video_ids[idx] = vid_ids[j]
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % log_step == 0:
logging('Test: [{0:2d}/{1:2d}]\t'
'{e_log}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
.format(
i, len(data_loader), batch_time=batch_time,
e_log=str(model.logger)))
del videos, captions
if return_ids == True:
return video_embs, cap_embs, video_ids, caption_ids
else:
return video_embs, cap_embs
def encode_data(model, data_loader, log_step=10, logging=print, return_ids=True):
"""Encode all videos and captions loadable by `data_loader`
"""
batch_time = AverageMeter()
val_logger = LogCollector()
# switch to evaluate mode
model.val_start()
end = time.time()
# numpy array to keep all the embeddings
video_embs = None
cap_embs = None
video_ids = ['']*len(data_loader.dataset)
caption_ids = ['']*len(data_loader.dataset)
for i, (videos, captions, idxs, cap_ids, vid_ids) in enumerate(data_loader):
# make sure val logger is used
model.logger = val_logger
# compute the embeddings
vid_emb, cap_emb = model.forward_emb(videos, captions, volatile=True)
# initialize the numpy arrays given the size of the embeddings
if video_embs is None:
video_embs = np.zeros((len(data_loader.dataset), vid_emb.size(1)))
cap_embs = np.zeros((len(data_loader.dataset), cap_emb.size(1)))
# preserve the embeddings by copying from gpu and converting to numpy
video_embs[idxs] = vid_emb.data.cpu().numpy().copy()
cap_embs[idxs] = cap_emb.data.cpu().numpy().copy()
for j, idx in enumerate(idxs):
caption_ids[idx] = cap_ids[j]
video_ids[idx] = vid_ids[j]
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % log_step == 0:
logging('Test: [{0:2d}/{1:2d}]\t'
'{e_log}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
.format(
i, len(data_loader), batch_time=batch_time,
e_log=str(model.logger)))
del videos, captions
if return_ids == True:
return video_embs, cap_embs, video_ids, caption_ids
else:
return video_embs, cap_embs
# recall@k, Med r, Mean r for Text-to-Video Retrieval
def t2i(c2i, vis_details=False, n_caption=5):
"""
Text->Videos (Text-to-Video Retrieval)
c2i: (5N, N) matrix of caption to video errors
vis_details: if true, return a dictionary for ROC visualization purposes
"""
# print("errors matrix shape: ", c2i.shape)
# assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
ranks = np.zeros(c2i.shape[0])
for i in range(len(ranks)):
d_i = c2i[i]
inds = np.argsort(d_i)
rank = np.where(inds == i/n_caption)[0][0]
ranks[i] = rank
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return map(float, [r1, r5, r10, medr, meanr])
# recall@k, Med r, Mean r for Video-to-Text Retrieval
def i2t(c2i, n_caption=5):
"""
Videos->Text (Video-to-Text Retrieval)
c2i: (5N, N) matrix of caption to video errors
"""
#remove duplicate videos
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
ranks = np.zeros(c2i.shape[1])
for i in range(len(ranks)):
d_i = c2i[:, i]
inds = np.argsort(d_i)
rank = np.where(inds/n_caption == i)[0][0]
ranks[i] = rank
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return map(float, [r1, r5, r10, medr, meanr])
# mAP for Text-to-Video Retrieval
def t2i_map(c2i, n_caption=5):
"""
Text->Videos (Text-to-Video Retrieval)
c2i: (5N, N) matrix of caption to video errors
"""
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
scorer = getScorer('AP')
perf_list = []
for i in range(c2i.shape[0]):
d_i = c2i[i, :]
labels = [0]*len(d_i)
labels[i/n_caption] = 1
sorted_labels = [labels[x] for x in np.argsort(d_i)]
current_score = scorer.score(sorted_labels)
perf_list.append(current_score)
return np.mean(perf_list)
# mAP for Video-to-Text Retrieval
def i2t_map(c2i, n_caption=5):
"""
Videos->Text (Video-to-Text Retrieval)
c2i: (5N, N) matrix of caption to video errors
"""
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
scorer = getScorer('AP')
perf_list = []
for i in range(c2i.shape[1]):
d_i = c2i[:, i]
labels = [0]*len(d_i)
labels[i*n_caption:(i+1)*n_caption] = [1]*n_caption
sorted_labels = [labels[x] for x in np.argsort(d_i)]
current_score = scorer.score(sorted_labels)
perf_list.append(current_score)
return np.mean(perf_list)
def t2i_inv_rank(c2i, n_caption=1):
"""
Text->Videos (Text-to-Video Retrieval)
c2i: (5N, N) matrix of caption to video errors
n_caption: number of captions of each image/video
"""
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
inv_ranks = np.zeros(c2i.shape[0])
for i in range(len(inv_ranks)):
d_i = c2i[i,:]
inds = np.argsort(d_i)
rank = np.where(inds == i/n_caption)[0]
inv_ranks[i] = sum(1.0 / (rank +1 ))
return np.mean(inv_ranks)
def i2t_inv_rank(c2i, n_caption=1):
"""
Videos->Text (Video-to-Text Retrieval)
c2i: (5N, N) matrix of caption to video errors
n_caption: number of captions of each image/video
"""
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
inv_ranks = np.zeros(c2i.shape[1])
for i in range(len(inv_ranks)):
d_i = c2i[:, i]
inds = np.argsort(d_i)
rank = np.where(inds/n_caption == i)[0]
inv_ranks[i] = sum(1.0 / (rank +1 ))
return np.mean(inv_ranks)
def i2t_inv_rank_multi(c2i, n_caption=2):
"""
Text->videos (Image Search)
c2i: (5N, N) matrix of caption to image errors
n_caption: number of captions of each image/video
"""
# print("errors matrix shape: ", c2i.shape)
assert c2i.shape[0] / c2i.shape[1] == n_caption, c2i.shape
inv_ranks = np.zeros(c2i.shape[1])
result = []
for i in range(n_caption):
idx = range(i, c2i.shape[0], n_caption)
sub_c2i = c2i[idx, :]
score = i2t_inv_rank(sub_c2i, n_caption=1)
result.append(score)
return result
|
py | 1a4a66701aa8728366e4f641f58f0742d66996e5 | """Datasets are defined as scripts and have unique properties.
The Module defines generic dataset properties and models the
functions available for inheritance by the scripts or datasets.
"""
from __future__ import print_function
from weaver.engines import choose_engine
from weaver.lib.models import *
from weaver.lib.process import make_sql
class Script(object):
"""This class defines the properties of a generic dataset.
Each Dataset inherits attributes from this class to define
it's Unique functionality.
"""
def __init__(
self,
title="",
description="",
name="",
urls=dict(),
tables=dict(),
ref="",
public=True,
addendum=None,
citation="Not currently available",
licenses=[{"name": None}],
retriever_minimum_version="",
version="",
encoding="",
message="",
**kwargs
):
self.title = title
self.name = name
self.filename = __name__
self.description = description
self.urls = urls
self.tables = tables
self.ref = ref
self.public = public
self.addendum = addendum
self.citation = citation
self.licenses = licenses
self.keywords = []
self.retriever_minimum_version = retriever_minimum_version
self.encoding = encoding
self.version = version
self.message = message
for key, item in list(kwargs.items()):
setattr(self, key, item[0] if isinstance(item, tuple) else item)
def __str__(self):
desc = self.name
if self.reference_url():
desc += "\n" + self.reference_url()
return desc
def integrate(self, engine=None, debug=False):
"""Generic function to prepare for integration."""
self.engine = self.checkengine(engine)
self.engine.debug = debug
self.engine.db_name = self.name
self.engine.create_db()
def reference_url(self):
if self.ref:
return self.ref
else:
if len(self.urls) == 1:
return self.urls[list(self.urls.keys())[0]]
else:
return None
def checkengine(self, engine=None):
"""Returns the required engine instance"""
if engine is None:
opts = {}
engine = choose_engine(opts)
engine.get_input()
engine.script = self
return engine
def exists(self, engine=None):
if engine:
return engine.exists(self)
else:
return False
def matches_terms(self, terms):
try:
search_string = " ".join(
[self.name, self.description, self.name] + self.keywords
).upper()
for term in terms:
if not term.upper() in search_string:
return False
return True
except:
return False
class BasicTextTemplate(Script):
"""Defines the pre processing required for scripts.
Scripts that need pre processing should use the download function
from this class.
Scripts that require extra tune up, should override this class.
"""
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
for key in kwargs:
setattr(self, key, kwargs[key])
self.db_table_name = None
def integrate(self, engine=None, debug=False):
"""Create the SQL query to be sent to the Engine
Uses the scripts' integrate function to prepare the engine
and it creates the database to store the result.
"""
Script.integrate(self, engine, debug)
sql_statement = make_sql(self)
result_db = engine.database_name()
result_table = self.result["table"]
db_table_name = "{db_name}.{table_name}".format(
db_name=result_db, table_name=result_table
)
self.db_table_name = db_table_name
Script.db_table_name = db_table_name
drop_query = self.engine.drop_statement("TABLE", db_table_name)
join_query = sql_statement.format(
result_dbi=result_db, result_tablei=result_table
)
try:
if self.engine.debug:
print(drop_query)
self.engine.execute(drop_query)
except:
pass
try:
if self.engine.debug:
print(join_query)
self.engine.execute(join_query)
except Exception as e:
try:
self.connection.rollback()
except Exception as _:
pass
print(e)
print("Process successfully launched in Database.")
print("Please wait for the table to render")
return engine
TEMPLATES = {"default": BasicTextTemplate}
|
py | 1a4a66f9b6ba4eb6fbfc2e3a913cdfe5dc7a3a64 | # -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from hyde.generator import Generator
from hyde.site import Site
from hyde.tests.util import assert_no_diff
from fswrap import File, Folder
SCSS_SOURCE = File(__file__).parent.child_folder('scss')
TEST_SITE = File(__file__).parent.parent.child_folder('_test')
class TestSassyCSS(object):
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder(
'sites/test_jinja').copy_contents_to(TEST_SITE)
SCSS_SOURCE.copy_contents_to(TEST_SITE.child('content/media/css'))
File(TEST_SITE.child('content/media/css/site.css')).delete()
def tearDown(self):
TEST_SITE.delete()
def test_scss(self):
s = Site(TEST_SITE)
s.config.mode = 'prod'
s.config.plugins = ['hyde.ext.plugins.css.SassyCSSPlugin']
source = TEST_SITE.child('content/media/css/site.scss')
target = File(
Folder(s.config.deploy_root_path).child('media/css/site.css'))
gen = Generator(s)
gen.generate_resource_at_path(source)
assert target.exists
text = target.read_all()
expected_text = File(SCSS_SOURCE.child('expected-site.css')).read_all()
assert_no_diff(expected_text, text)
|
py | 1a4a67d1799755c0c2c1ab62c2d7d459bad60d21 | #!/usr/bin/env python
__all__ = ['douban_download']
import urllib.request, urllib.parse
from ..common import *
def douban_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
html = get_html(url)
if re.match(r'https?://movie', url):
title = match1(html, 'name="description" content="([^"]+)')
tid = match1(url, 'trailer/(\d+)')
real_url = 'https://movie.douban.com/trailer/video_url?tid=%s' % tid
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
elif 'subject' in url:
titles = re.findall(r'data-title="([^"]*)">', html)
song_id = re.findall(r'<li class="song-item" id="([^"]*)"', html)
song_ssid = re.findall(r'data-ssid="([^"]*)"', html)
get_song_url = 'http://music.douban.com/j/songlist/get_song_url'
for i in range(len(titles)):
title = titles[i]
datas = {
'sid': song_id[i],
'ssid': song_ssid[i]
}
post_params = urllib.parse.urlencode(datas).encode('utf-8')
try:
resp = urllib.request.urlopen(get_song_url, post_params)
resp_data = json.loads(resp.read().decode('utf-8'))
real_url = resp_data['r']
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
except:
pass
if not info_only:
try:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
except:
pass
else:
titles = re.findall(r'"name":"([^"]*)"', html)
real_urls = [re.sub('\\\\/', '/', i) for i in re.findall(r'"rawUrl":"([^"]*)"', html)]
for i in range(len(titles)):
title = titles[i]
real_url = real_urls[i]
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Douban.com"
download = douban_download
download_playlist = playlist_not_supported('douban')
|
py | 1a4a68ebacbfc17ce21be6fb8777657fde6893df | import pkg_resources as pkr
# List of compatible firmware builds
compat_fw = [511]
# List of compatible patches
compat_patch = [0]
# List of compatible packs
compat_packs = []
# Compatible network protocol version
protocol_version = '8'
# Official release name
release = pkr.get_distribution("pymoku").version
|
py | 1a4a693ef5ab2f23ecaa31cef3feb53c24d428d8 | from src.logger import get_logger
import ast
import src.cAST as cAST
logger = get_logger('Visitor')
class Visitor(ast.NodeVisitor):
"""Visitor class to traverse ast
While traversing existing ast, a custom AST (cAST) is persisted.
cAST takes in to account imports in code and their aliases in order to homogenise the ast.
"""
def __init__(self, inbuild_imports, sys_imports):
self.custom_ast = None
self.inbuild_imports = inbuild_imports
self.sys_imports = sys_imports
self.aliases = dict()
def set_custom_ast(self, cast):
self.custom_ast = cast
def get_custom_ast(self) -> cAST:
return self.custom_ast
def save_alias(self, alias: str, original: str):
self.aliases.update({alias: original})
# =============================================================================================
# Utils Visitor
# =============================================================================================
@staticmethod
def _format(node: ast) -> dict:
"""Get a dict representation of node.
:param node: Node to format
:return: dict representation of node
"""
# If ast.AST, get all fields under such node and their pair value.
if isinstance(node, ast.AST):
args = []
keywords=False
for field in node._fields:
try:
value = getattr(node, field)
except AttributeError:
keywords = True
else:
if keywords:
args.append({field: Visitor._format(value)})
else:
args.append(Visitor._format(value))
return {node.__class__.__name__: args}
# If list, create a list of formatted childs and return it
elif isinstance(node, list):
return [Visitor._format(x) for x in node]
# Arrived to this point, should provide representation of primitive types like str, float, etc.
return repr(node)
def alias2original(self, alias: str) -> str:
"""Given an alias, search it in available ones and provide original name.
If not found, return same name.
:param alias: key string to look in self.aliases
:return: linked name for alias
"""
if alias in self.aliases:
return self.aliases.get(alias)
return alias
@staticmethod
def get_generic_attributes(node: ast.AST) -> dict:
"""By default, attributes stored in cAST will be a list of the values retrieved
by Visitor._format().
:param node: ast.AST node to extract attributes
:return: dict
"""
return Visitor._format(node).values()
@staticmethod
def initialise_child(parent: cAST.Node, child: ast):
"""Create cAST node given child ast node. Set child's parent. Set child as parent's child.
:param parent: parent ast node
:param child: child ast node
:return:
"""
cast_child = cAST.Node(child)
cast_child.set_parent(parent)
parent.set_child(cast_child)
# END Utils Visitor
# ---------------------------------------------------------------------------------------------
# =============================================================================================
# Generic TRAVERSE OF ast.AST
# =============================================================================================
def generic_visit(self, node, general_behaviour=True, look_down=True):
"""Shared/Generic behaviour when visiting node.
:param node: ast.AST Node being traversed
:param general_behaviour: bool. apply general behaviour or not
:param look_down: keep recursive traversal after this node
:return:
"""
metadata = self._format(node)
if isinstance(node, ast.Module):
node_module = cAST.Node(node)
node_module.set_metadata(metadata)
self.populate_CAST_node(node_module)
cast_module = cAST.cAST(root=node_module)
self.set_custom_ast(cast_module)
elif isinstance(node, ast.AST):
cast_node = self.get_custom_ast().find_node(node)
cast_node.set_metadata(metadata)
if general_behaviour:
cast_node.set_is_default_attributes(general_behaviour)
self.populate_CAST_node(cast_node)
else:
logger.error('expected ast.AST or ast.Module, got %r' % node.__class__.__name__)
if look_down:
ast.NodeVisitor.generic_visit(self, node)
@staticmethod
def populate_CAST_node(node: cAST):
"""Given a CAST node, create subsequent CAST childs and link them to node.
If no child found, that node is a leaf node. Get attributes and set_attributes().
If childs found, that node is a parent node. Initialise childs.
:param node: cAST node to populate
:return:
"""
ast_node = node.get_ast_node()
iterator = ast.iter_child_nodes(ast_node)
try:
first_child = next(iterator)
Visitor.initialise_child(parent=node, child=first_child)
except StopIteration:
attributes = Visitor.get_generic_attributes(ast_node)
node.set_attributes(attributes)
for child in iterator:
Visitor.initialise_child(parent=node, child=child)
# END Generic TRAVERSE OF ast.AST
# ---------------------------------------------------------------------------------------------
# =============================================================================================
# CUSTOM visit for ast.AST node
# =============================================================================================
def _treat_import(self, node):
from src.constants import Origin
attributes = list()
for alias in node.names:
if alias.asname is not None:
self.save_alias(alias=alias.asname, original=alias.name)
if alias.name in self.inbuild_imports:
attributes.append({"origin": Origin.NATIVE})
elif alias.name in self.sys_imports:
attributes.append({"origin": Origin.SYSTEM})
else:
attributes.append({"origin": Origin.UNKNOWN})
if isinstance(node, ast.ImportFrom):
attributes.append({"name": node.module + "." + alias.name})
elif isinstance(node, ast.Import):
attributes.append({"name": alias.name})
else:
raise("Non accepted class in Import treatment.")
return attributes
def visit_Import(self, node):
cast_node = self.get_custom_ast().find_node(node)
attributes = self._treat_import(node)
cast_node.set_is_default_attributes(False)
cast_node.set_attributes(attributes)
self.generic_visit(node, general_behaviour=False, look_down=False)
def visit_ImportFrom(self, node):
cast_node = self.get_custom_ast().find_node(node)
attributes = self._treat_import(node)
cast_node.set_is_default_attributes(False)
cast_node.set_attributes(attributes)
self.generic_visit(node, general_behaviour=False, look_down=False)
@staticmethod
def _treat_name(node) -> list:
attributes = list()
formatted_node = Visitor._format(node)
name_values = list(formatted_node.values())[0] # format: [[id, {action: [args]}]]
id = name_values[0]
action_dict = name_values[1]
action = list(action_dict.keys())[0]
attributes.append({"id": id, "ctx": action})
return attributes
def visit_NameConstant(self, node):
self.generic_visit(node)
def visit_Name(self, node):
cast_node = self.get_custom_ast().find_node(node)
attributes = self._treat_name(node)
cast_node.set_is_default_attributes(False)
cast_node.set_attributes(attributes)
self.generic_visit(node, general_behaviour=False, look_down=False)
def _treat_call(self, node: ast) -> list:
from src.constants import Origin
attributes = list()
func_call = node.__dict__.get('func')
original_id = self.alias2original(func_call.__dict__.get('id'))
if original_id in self.sys_imports:
attributes.append({'origin': Origin.SYSTEM})
elif original_id in self.inbuild_imports:
attributes.append({'origin': Origin.NATIVE})
else:
attributes.append({'origin': Origin.UNKNOWN})
change_id = {'id': original_id}
func_call.__dict__.update(change_id)
return attributes
def visit_Call(self, node):
cast_node = self.get_custom_ast().find_node(node)
attributes = self._treat_call(node)
cast_node.set_is_default_attributes(False)
cast_node.set_attributes(attributes)
self.populate_CAST_node(cast_node)
self.generic_visit(node, general_behaviour=False, look_down=True)
@staticmethod
def _treat_keyword(node: ast) -> list:
attributes = list()
arg = node.__dict__.get('arg')
attributes.append({'arg': arg})
return attributes
def visit_keyword(self, node):
cast_node = self.get_custom_ast().find_node(node)
attributes = self._treat_keyword(node)
cast_node.set_is_default_attributes(False)
cast_node.set_attributes(attributes)
self.populate_CAST_node(cast_node)
self.generic_visit(node, general_behaviour=False, look_down=True)
@staticmethod
def _treat_attribute(node: ast) -> list:
attributes = list()
attr = node.__dict__.get('attr')
attributes.append({'attr': attr})
return attributes
def visit_Attribute(self, node):
cast_node = self.get_custom_ast().find_node(node)
attributes = self._treat_attribute(node)
cast_node.set_is_default_attributes(False)
cast_node.set_attributes(attributes)
self.populate_CAST_node(cast_node)
self.generic_visit(node, general_behaviour=False, look_down=True)
# END CUSTOM visit for ast.AST node
# ---------------------------------------------------------------------------------------------
# =============================================================================================
# DEFAULT visit for ast.AST node
# =============================================================================================
def visit_Num(self, node):
self.generic_visit(node)
def visit_Str(self, node):
self.generic_visit(node)
def visit_FormattedValue(self, node):
self.generic_visit(node)
def visit_JoinedStr(self, node):
self.generic_visit(node)
def visit_Bytes(self, node):
self.generic_visit(node)
def visit_List(self, node):
self.generic_visit(node)
def visit_Tuple(self, node):
self.generic_visit(node)
def visit_Set(self, node):
self.generic_visit(node)
def visit_Dict(self, node):
self.generic_visit(node)
def visit_Ellipsis(self, node):
self.generic_visit(node)
def visit_Load(self, node):
self.generic_visit(node)
def visit_Store(self, node):
self.generic_visit(node)
def visit_Del(self, node):
self.generic_visit(node)
def visit_Starred(self, node):
self.generic_visit(node)
def visit_Expr(self, node):
self.generic_visit(node)
def visit_UnaryOp(self, node):
self.generic_visit(node)
def visit_UAdd(self, node):
self.generic_visit(node)
def visit_USub(self, node):
self.generic_visit(node)
def visit_Not(self, node):
self.generic_visit(node)
def visit_Invert(self, node):
self.generic_visit(node)
def visit_BinOp(self, node):
self.generic_visit(node)
def visit_Add(self, node):
self.generic_visit(node)
def visit_Sub(self, node):
self.generic_visit(node)
def visit_Mult(self, node):
self.generic_visit(node)
def visit_Div(self, node):
self.generic_visit(node)
def visit_FloorDiv(self, node):
self.generic_visit(node)
def visit_Mod(self, node):
self.generic_visit(node)
def visit_Pow(self, node):
self.generic_visit(node)
def visit_LShift(self, node):
self.generic_visit(node)
def visit_RShift(self, node):
self.generic_visit(node)
def visit_BitOr(self, node):
self.generic_visit(node)
def visit_BitXor(self, node):
self.generic_visit(node)
def visit_BitAnd(self, node):
self.generic_visit(node)
def visit_MatMult(self, node):
self.generic_visit(node)
def visit_BoolOp(self, node):
self.generic_visit(node)
def visit_And(self, node):
self.generic_visit(node)
def visit_Or(self, node):
self.generic_visit(node)
def visit_Compare(self, node):
self.generic_visit(node)
def visit_Eq(self, node):
self.generic_visit(node)
def visit_NotEq(self, node):
self.generic_visit(node)
def visit_Lt(self, node):
self.generic_visit(node)
def visit_LtE(self, node):
self.generic_visit(node)
def visit_Gt(self, node):
self.generic_visit(node)
def visit_GtE(self, node):
self.generic_visit(node)
def visit_Is(self, node):
self.generic_visit(node)
def visit_IsNot(self, node):
self.generic_visit(node)
def visit_In(self, node):
self.generic_visit(node)
def visit_NotIn(self, node):
self.generic_visit(node)
def visit_IfExp(self, node):
self.generic_visit(node)
def visit_Subscript(self, node):
self.generic_visit(node)
def visit_Index(self, node):
self.generic_visit(node)
def visit_Slice(self, node):
self.generic_visit(node)
def visit_ExtSlice(self, node):
self.generic_visit(node)
def visit_ListComp(self, node):
self.generic_visit(node)
def visit_SetComp(self, node):
self.generic_visit(node)
def visit_GeneratorExp(self, node):
self.generic_visit(node)
def visit_DictComp(self, node):
self.generic_visit(node)
def visit_comprehension(self, node):
self.generic_visit(node)
def visit_Assign(self, node):
self.generic_visit(node)
def visit_AnnAssign(self, node):
self.generic_visit(node)
def visit_AugAssign(self, node):
self.generic_visit(node)
def visit_Print(self, node):
self.generic_visit(node)
def visit_Raise(self, node):
self.generic_visit(node)
def visit_Assert(self, node):
self.generic_visit(node)
def visit_Delete(self, node):
self.generic_visit(node)
def visit_Pass(self, node):
self.generic_visit(node)
def visit_alias(self, node):
self.generic_visit(node)
def visit_If(self, node):
self.generic_visit(node)
def visit_For(self, node):
self.generic_visit(node)
def visit_While(self, node):
self.generic_visit(node)
def visit_Break(self, node):
self.generic_visit(node)
def visit_Continue(self, node):
self.generic_visit(node)
def visit_Try(self, node):
self.generic_visit(node)
def visit_TryFinally(self, node):
self.generic_visit(node)
def visit_TryExcept(self, node):
self.generic_visit(node)
def visit_ExceptHandler(self, node):
self.generic_visit(node)
def visit_With(self, node):
self.generic_visit(node)
def visit_withitem(self, node):
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.generic_visit(node)
def visit_Lambda(self, node):
self.generic_visit(node)
def visit_arguments(self, node):
self.generic_visit(node)
def visit_arg(self, node):
self.generic_visit(node)
def visit_Return(self, node):
self.generic_visit(node)
def visit_Yield(self, node):
self.generic_visit(node)
def visit_YieldFrom(self, node):
self.generic_visit(node)
def visit_Global(self, node):
self.generic_visit(node)
def visit_Nonlocal(self, node):
self.generic_visit(node)
def visit_ClassDef(self, node):
self.generic_visit(node)
def visit_AsyncFunctionDef(self, node):
self.generic_visit(node)
def visit_Await(self, node):
self.generic_visit(node)
def visit_AsyncFor(self, node):
self.generic_visit(node)
def visit_AsyncWith(self, node):
self.generic_visit(node)
# END DEFAULT visit for ast.AST node
# --------------------------------------------------------------------------------------------- |
py | 1a4a6b8f56036e52b722b54eb2bb3ec5a7027c30 | # @copyright Copyright 2019 United States Government as represented by the Administrator of the
# National Aeronautics and Space Administration. All Rights Reserved. */
#
import socket
import datetime
from trick.unit_test import *
#===============================================================================================
# Integration test includes
#===============================================================================================
execfile("int_tests/FluidTypes.py")
execfile("int_tests/FluidNetworkConstants.py")
#===============================================================================================
# GUNNS SIM_mass_overflow Test 45 Overflow case
#===============================================================================================
class TestOverflow45over(Test):
""" A network test class that derives from the Test base class.
The typical use is to derive from the Test base class, override setup and possibly
tearDown. Be sure to call registerEventBasedTest in the setup function to schedule when
your test functions are called. See example below.
The Test base class also has utility test functions that you can call to make comparisons.
There are several examples below. See Test.py for more comparisons.
"""
total_tc_mass_CO = 0.0
total_tc_mass_CH4 = 0.0
def __init__(self, testName, testStartMessage, testFinishMessage):
""" Class constructor that overrides its parent class constructor"""
# Invokes the class constructor of the parent class #
super(TestOverflow45over, self).__init__(testName, testStartMessage, testFinishMessage)
def setup(self):
""" Test setup function. Called before activating the test event.
This is where you setup your test environment prior to running all tests.
This is also where you want to schedule when your test functions are called.
"""
# Register all test events and schedule the times they are run at
self.registerEventBasedTest("Nodes Initial State Check", 0.0125, self.noOp, 0.0125, self.checkNodesInitState)
self.registerEventBasedTest("Nodes Final State Check", 12.0, self.noOp, 12.0, self.checkNodesFinalState)
# Call the parent class setup to complete the test setup
super(TestOverflow45over, self).setup()
# This stores total fluid quantities of all nodes for comparison against in the final test.
# This isn't actually testing any values, rather just storing values for later test.
def checkNodesInitState(self):
for node in range(0,self.getNumNodes()):
mass_CO = self.nodeTcMass(node, trick.ChemicalCompound.CO)
mass_CH4 = self.nodeTcMass(node, trick.ChemicalCompound.CH4)
self.total_tc_mass_CO = self.total_tc_mass_CO + mass_CO
self.total_tc_mass_CH4 = self.total_tc_mass_CH4 + mass_CH4
# Tests final total fluid quantities in all nodes against the stored values from the start of
# the test, for conservation of mass & energy.
def checkNodesFinalState(self):
final_total_tc_mass_CO = 0.0
final_total_tc_mass_CH4 = 0.0
for node in range(0,self.getNumNodes()):
mass_CO = self.nodeTcMass(node, trick.ChemicalCompound.CO)
mass_CH4 = self.nodeTcMass(node, trick.ChemicalCompound.CH4)
final_total_tc_mass_CO = final_total_tc_mass_CO + mass_CO
final_total_tc_mass_CH4 = final_total_tc_mass_CH4 + mass_CH4
print("-------------------------------------------------------------------------------------------------")
# Comparing relative error (final - start / start) to tolerance
self.testNear(final_total_tc_mass_CO - self.total_tc_mass_CO, 0.0, (1.0E-16 + tolerance * self.total_tc_mass_CO), " mass CO error fraction ::")
self.testNear(final_total_tc_mass_CH4 - self.total_tc_mass_CH4, 0.0, (1.0E-16 + tolerance * self.total_tc_mass_CH4), " mass CH4 error fraction ::")
""" This is where you setup all your getters/setters for the parameters you need for int testing.
"""
# Getter for number of nodes
def getNumNodes(self):
return massOverflow.fluid45over.getNumLocalNodes() - 1
# Getter for node
def node(self,node):
return massOverflow.fluid45over.netNodes[node]
# Getter for node fluid
def nodeTc(self,node):
return self.node(node).getContent().getTraceCompounds()
# Getter for node TC Mass
def nodeTcMass(self,node,compound):
return self.nodeTc(node).getMass(compound)
|
py | 1a4a6bd98f1027fda46a3c5d5c75e34c5a759d13 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .expressions import Expressions, ExpressionVisitors, RewriteNot
from .predicate import BoundPredicate, UnboundPredicate
def inclusive(spec, case_sensitive=True):
return InclusiveProjection(spec, case_sensitive)
def strict(spec):
return StrictProjection(spec)
class ProjectionEvaluator(ExpressionVisitors.ExpressionVisitor):
def project(self, expr):
raise NotImplementedError()
class BaseProjectionEvaluator(ProjectionEvaluator):
def __init__(self, spec, case_sensitive=True):
self.spec = spec
self.case_sensitive = case_sensitive
def project(self, expr):
# projections assume that there are no NOT nodes in the expression tree. to ensure that this
# is the case, the expression is rewritten to push all NOT nodes down to the expression
# leaf nodes.
# this is necessary to ensure that the default expression returned when a predicate can't be
# projected is correct.
#
return ExpressionVisitors.visit(ExpressionVisitors.visit(expr, RewriteNot.get()), self)
def always_true(self):
return Expressions.always_true()
def always_false(self):
return Expressions.always_false()
def not_(self, result):
raise RuntimeError("[BUG] project called on expression with a not")
def and_(self, left_result, right_result):
return Expressions.and_(left_result, right_result)
def or_(self, left_result, right_result):
return Expressions.or_(left_result, right_result)
def predicate(self, pred):
bound = pred.bind(self.spec.schema.as_struct(), case_sensitive=self.case_sensitive)
if isinstance(bound, BoundPredicate):
return self.predicate(bound)
return bound
class InclusiveProjection(BaseProjectionEvaluator):
def __init__(self, spec, case_sensitive=True):
super(InclusiveProjection, self).__init__(spec,
case_sensitive=case_sensitive)
def predicate(self, pred):
if isinstance(pred, UnboundPredicate):
return super(InclusiveProjection, self).predicate(pred)
part = self.spec.get_field_by_source_id(pred.ref.field_id)
if part is None:
return self.always_true()
result = part.transform.project(part.name, pred)
if result is not None:
return result
return self.always_true()
class StrictProjection(BaseProjectionEvaluator):
def __init__(self, spec):
super(StrictProjection, self).__init__(spec)
def predicate(self, pred):
part = self.spec.get_field_by_source_id(pred.ref.field_id)
if part is None:
return self.always_false()
result = part.transform.project_strict(part.name, pred)
if result is not None:
return result
return self.always_false()
|
py | 1a4a6bee1e2b00d2ec5d1ed9da0336b8b64f14ae | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import (
_BaseLazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
if is_tokenizers_available():
_import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"]
if is_torch_available():
_import_structure["modeling_electra"] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
if is_tf_available():
_import_structure["modeling_tf_electra"] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
if is_flax_available():
_import_structure["modeling_flax_electra"] = [
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig
from .tokenization_electra import ElectraTokenizer
if is_tokenizers_available():
from .tokenization_electra_fast import ElectraTokenizerFast
if is_torch_available():
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
if is_tf_available():
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
if is_flax_available():
from .modeling_flax_electra import (
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
|
py | 1a4a6d08aaf5d9d419fcba0c7bd57a21dc0ed799 | #!/usr/bin/env python3
questions = {
"strong": "Do ye like yer drinks strong?",
"salty": "Do ye like it with a salty tang?",
"bitter": "Are ye a lubber who likes it bitter?",
"sweet": "Would ye like a bit of sweetness with yer poison?",
"fruity": "Are ye one for a fruity finish?",
}
ingredients = {
"strong": ["glug of rum", "slug of whisky", "splash of gin"],
"salty": ["olive on a stick", "salt-dusted rim", "rasher of bacon"],
"bitter": ["shake of bitters", "splash of tonic", "twist of lemon peel"],
"sweet": ["sugar cube", "spoonful of honey", "spash of cola"],
"fruity": ["slice of orange", "dash of cassis", "cherry on top"],
}
def bartender(**data):
for flavor, drink_type in data.items():
question = drink_type
print(question)
print("Hi there, welcome to the Pirate Bartender program")
bartender(**questions)
|
py | 1a4a6df35461289f7ac4df3ceff125c3eb3a3e52 | from django.contrib.auth import models as auth
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# No related name is needed here, since symmetrical relations are not
# explicitly reversible.
@python_2_unicode_compatible
class SelfRefer(models.Model):
name = models.CharField(max_length=10)
references = models.ManyToManyField('self')
related = models.ManyToManyField('self')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
# Regression for #11956 -- a many to many to the base class
@python_2_unicode_compatible
class TagCollection(Tag):
tags = models.ManyToManyField(Tag, related_name='tag_collections')
def __str__(self):
return self.name
# A related_name is required on one of the ManyToManyField entries here because
# they are both addressable as reverse relations from Tag.
@python_2_unicode_compatible
class Entry(models.Model):
name = models.CharField(max_length=10)
topics = models.ManyToManyField(Tag)
related = models.ManyToManyField(Tag, related_name="similar")
def __str__(self):
return self.name
# Two models both inheriting from a base model with a self-referential m2m field
class SelfReferChild(SelfRefer):
pass
class SelfReferChildSibling(SelfRefer):
pass
# Many-to-Many relation between models, where one of the PK's isn't an Autofield
class Line(models.Model):
name = models.CharField(max_length=100)
class Worksheet(models.Model):
id = models.CharField(primary_key=True, max_length=100)
lines = models.ManyToManyField(Line, blank=True, null=True)
# Regression for #11226 -- A model with the same name that another one to
# which it has a m2m relation. This shouldn't cause a name clash between
# the automatically created m2m intermediary table FK field names when
# running syncdb
class User(models.Model):
name = models.CharField(max_length=30)
friends = models.ManyToManyField(auth.User)
class BadModelWithSplit(models.Model):
name = models.CharField(max_length=1)
def split(self):
raise RuntimeError('split should not be called')
class Meta:
abstract = True
class RegressionModelSplit(BadModelWithSplit):
"""
Model with a split method should not cause an error in add_lazy_relation
"""
others = models.ManyToManyField('self')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.