repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
apollo-ng/governess | server/lib/drivers/input/hx711/example.py | 1 | 2129 | import RPi.GPIO as GPIO
import time
import sys
from hx711 import HX711
def cleanAndExit():
print ("Cleaning...")
GPIO.cleanup()
print("Bye!")
sys.exit()
hx = HX711(5, 6)
# I've found out that, for some reason, the order of the bytes is not always the same between versions of python, numpy and the hx711 itself.
# Still need to figure out why does it change.
# If you're experiencing super random values, change these values to MSB or LSB until to get more stable values.
# There is some code below to debug and log the order of the bits and the bytes.
# The first parameter is the order in which the bytes are used to build the "long" value.
# The second paramter is the order of the bits inside each byte.
# According to the HX711 Datasheet, the second parameter is MSB so you shouldn't need to modify it.
hx.set_reading_format("LSB", "MSB")
# HOW TO CALCULATE THE REFFERENCE UNIT
# To set the reference unit to 1. Put 1kg on your sensor or anything you have and know exactly how much it weights.
# In this case, 92 is 1 gram because, with 1 as a reference unit I got numbers near 0 without any weight
# and I got numbers around 184000 when I added 2kg. So, according to the rule of thirds:
# If 2000 grams is 184000 then 1000 grams is 184000 / 2000 = 92.
# hx.set_reference_unit(92)
hx.set_reference_unit(21)
hx.reset()
hx.tare()
time.sleep(1)
while True:
try:
# These three lines are usefull to debug wether to use MSB or LSB in the reading formats
# for the first parameter of "hx.set_reading_format("LSB", "MSB")".
# Comment the two lines "val = hx.get_weight(5)" and "print val" and uncomment the three lines to see what it prints.
#np_arr8_string = hx.get_np_arr8_string()
#binary_string = hx.get_binary_string()
#print binary_string + " " + np_arr8_string
# Prints the weight. Comment if you're debbuging the MSB and LSB issue.
val = hx.get_avg_weight(30, 5)
print(val)
#
# hx.power_down()
# hx.power_up()
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
cleanAndExit()
| gpl-3.0 | 2,100,305,988,766,961,000 | 39.942308 | 141 | 0.690465 | false |
bintlabs/python-sync-db | dbsync/utils.py | 1 | 4441 | """
.. module:: dbsync.utils
:synopsis: Utility functions.
"""
import random
import inspect
from sqlalchemy.orm import (
object_mapper,
class_mapper,
ColumnProperty,
noload,
defer,
instrumentation,
state)
def generate_secret(length=128):
chars = "0123456789"\
"abcdefghijklmnopqrstuvwxyz"\
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"\
".,_-+*@:;[](){}~!?|<>=/\&$#"
return "".join(random.choice(chars) for _ in xrange(length))
def properties_dict(sa_object):
"""
Returns a dictionary of column-properties for the given SQLAlchemy
mapped object.
"""
mapper = object_mapper(sa_object)
return dict((prop.key, getattr(sa_object, prop.key))
for prop in mapper.iterate_properties
if isinstance(prop, ColumnProperty))
def column_properties(sa_variant):
"Returns a list of column-properties."
mapper = class_mapper(sa_variant) if inspect.isclass(sa_variant) \
else object_mapper(sa_variant)
return [prop.key for prop in mapper.iterate_properties
if isinstance(prop, ColumnProperty)]
def types_dict(sa_class):
"""
Returns a dictionary of column-properties mapped to their
SQLAlchemy types for the given mapped class.
"""
mapper = class_mapper(sa_class)
return dict((prop.key, prop.columns[0].type)
for prop in mapper.iterate_properties
if isinstance(prop, ColumnProperty))
def construct_bare(class_):
"""
Returns an object of type *class_*, without invoking the class'
constructor.
"""
obj = class_.__new__(class_)
manager = getattr(class_, instrumentation.ClassManager.MANAGER_ATTR)
setattr(obj, manager.STATE_ATTR, state.InstanceState(obj, manager))
return obj
def object_from_dict(class_, dict_):
"Returns an object from a dictionary of attributes."
obj = construct_bare(class_)
for k, v in dict_.iteritems():
setattr(obj, k, v)
return obj
def copy(obj):
"Returns a copy of the given object, not linked to a session."
return object_from_dict(type(obj), properties_dict(obj))
def get_pk(sa_variant):
"Returns the primary key name for the given mapped class or object."
mapper = class_mapper(sa_variant) if inspect.isclass(sa_variant) \
else object_mapper(sa_variant)
return mapper.primary_key[0].key
def parent_references(sa_object, models):
"""
Returns a list of pairs (*sa_class*, *pk*) that reference all the
parent objects of *sa_object*.
"""
mapper = object_mapper(sa_object)
references = [(getattr(sa_object, k.parent.name), k.column.table)
for k in mapper.mapped_table.foreign_keys]
def get_model(table):
for m in models:
if class_mapper(m).mapped_table == table:
return m
return None
return [(m, pk)
for m, pk in ((get_model(table), v) for v, table in references)
if m is not None]
def parent_objects(sa_object, models, session, only_pk=False):
"""
Returns all the parent objects the given *sa_object* points to
(through foreign keys in *sa_object*).
*models* is a list of mapped classes.
*session* must be a valid SA session instance.
"""
return filter(lambda obj: obj is not None,
(query_model(session, m, only_pk=only_pk).\
filter_by(**{get_pk(m): val}).first()
for m, val in parent_references(sa_object, models)))
def query_model(session, sa_class, only_pk=False):
"""
Returns a query for *sa_class* that doesn't load any relationship
attribute.
"""
opts = (noload('*'),)
if only_pk:
pk = get_pk(sa_class)
opts += tuple(
defer(prop.key)
for prop in class_mapper(sa_class).iterate_properties
if isinstance(prop, ColumnProperty)
if prop.key != pk)
return session.query(sa_class).options(*opts)
class EventRegister(object):
def __init__(self):
self._listeners = []
def __iter__(self):
for listener in self._listeners:
yield listener
def listen(self, listener):
"Register a listener. May be used as a decorator."
assert inspect.isroutine(listener), "invalid listener"
if listener not in self._listeners:
self._listeners.append(listener)
return listener
| mit | -6,795,532,643,665,150,000 | 28.606667 | 75 | 0.62531 | false |
foursquare/pants | tests/python/pants_test/net/http/test_fetcher.py | 1 | 14532 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import http.server
import os
import socketserver
import unittest
from builtins import str
from contextlib import closing, contextmanager
from functools import reduce
from io import BytesIO
from threading import Thread
import mock
import requests
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_dir, temporary_file
from pants.util.dirutil import safe_open, touch
class FetcherTest(unittest.TestCase):
def setUp(self):
self.requests = mock.Mock(spec=requests.Session)
self.response = mock.Mock(spec=requests.Response)
self.fetcher = Fetcher('/unused/root/dir', requests_api=self.requests)
self.listener = mock.create_autospec(Fetcher.Listener, spec_set=True)
def status_call(self, status_code, content_length=None):
return mock.call.status(status_code, content_length=content_length)
def ok_call(self, chunks):
return self.status_call(200, content_length=sum(len(c) for c in chunks))
def assert_listener_calls(self, expected_listener_calls, chunks, expect_finished=True):
expected_listener_calls.extend(mock.call.recv_chunk(chunk) for chunk in chunks)
if expect_finished:
expected_listener_calls.append(mock.call.finished())
self.assertEqual(expected_listener_calls, self.listener.method_calls)
def assert_local_file_fetch(self, url_prefix=''):
chunks = [b'0123456789', b'a']
with temporary_file() as fp:
for chunk in chunks:
fp.write(chunk)
fp.close()
self.fetcher.fetch(url_prefix + fp.name, self.listener, chunk_size_bytes=10)
self.assert_listener_calls([self.ok_call(chunks)], chunks)
self.requests.assert_not_called()
def test_file_path(self):
self.assert_local_file_fetch()
def test_file_scheme(self):
self.assert_local_file_fetch('file:')
def assert_local_file_fetch_relative(self, url, *rel_path):
expected_contents = b'proof'
with temporary_dir() as root_dir:
with safe_open(os.path.join(root_dir, *rel_path), 'wb') as fp:
fp.write(expected_contents)
with temporary_file() as download_fp:
Fetcher(root_dir).download(url, path_or_fd=download_fp)
download_fp.close()
with open(download_fp.name, 'rb') as fp:
self.assertEqual(expected_contents, fp.read())
def test_file_scheme_double_slash_relative(self):
self.assert_local_file_fetch_relative('file://relative/path', 'relative', 'path')
def test_file_scheme_embedded_double_slash(self):
self.assert_local_file_fetch_relative('file://a//strange//path', 'a', 'strange', 'path')
def test_file_scheme_triple_slash(self):
self.assert_local_file_fetch('file://')
def test_file_dne(self):
with temporary_dir() as base:
with self.assertRaises(self.fetcher.PermanentError):
self.fetcher.fetch(os.path.join(base, 'dne'), self.listener)
def test_file_no_perms(self):
with temporary_dir() as base:
no_perms = os.path.join(base, 'dne')
touch(no_perms)
os.chmod(no_perms, 0)
self.assertTrue(os.path.exists(no_perms))
with self.assertRaises(self.fetcher.PermanentError):
self.fetcher.fetch(no_perms, self.listener)
@contextmanager
def expect_get(self, url, chunk_size_bytes, timeout_secs, chunks=None, listener=True):
chunks = chunks or [b'0123456789', b'a']
size = sum(len(c) for c in chunks)
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {'content-length': str(size)}
self.response.iter_content.return_value = chunks
yield chunks, [self.ok_call(chunks)] if listener else []
self.requests.get.expect_called_once_with(url, allow_redirects=True, stream=True,
timeout=timeout_secs)
self.response.iter_content.expect_called_once_with(chunk_size=chunk_size_bytes)
def test_get(self):
with self.expect_get('http://bar',
chunk_size_bytes=1024,
timeout_secs=60) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://bar',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.expect_called_once_with()
def test_checksum_listener(self):
digest = mock.Mock(spec=hashlib.md5())
digest.hexdigest.return_value = '42'
checksum_listener = Fetcher.ChecksumListener(digest=digest)
with self.expect_get('http://baz',
chunk_size_bytes=1,
timeout_secs=37) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://baz',
checksum_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assertEqual('42', checksum_listener.checksum)
def expected_digest_calls():
for chunk in chunks:
yield mock.call.update(chunk)
yield mock.call.hexdigest()
self.assertEqual(list(expected_digest_calls()), digest.method_calls)
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.assert_called_once_with()
def concat_chunks(self, chunks):
return reduce(lambda acc, c: acc + c, chunks, b'')
def test_download_listener(self):
with self.expect_get('http://foo',
chunk_size_bytes=1048576,
timeout_secs=3600) as (chunks, expected_listener_calls):
with closing(BytesIO()) as fp:
self.fetcher.fetch('http://foo',
Fetcher.DownloadListener(fp).wrap(self.listener),
chunk_size_bytes=1024 * 1024,
timeout_secs=60 * 60)
downloaded = self.concat_chunks(chunks)
self.assertEqual(downloaded, fp.getvalue())
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.assert_called_once_with()
def test_size_mismatch(self):
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
chunks = ['a', 'b']
self.response.iter_content.return_value = chunks
with self.assertRaises(self.fetcher.Error):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.response.iter_content.assert_called_once_with(chunk_size=1024)
self.assert_listener_calls([self.status_call(200, content_length=11)], chunks,
expect_finished=False)
self.response.close.assert_called_once_with()
def test_get_error_transient(self):
self.requests.get.side_effect = requests.ConnectionError
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
def test_get_error_permanent(self):
self.requests.get.side_effect = requests.TooManyRedirects
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertTrue(e.exception.response_code is None)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
def test_http_error(self):
self.requests.get.return_value = self.response
self.response.status_code = 404
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertEqual(404, e.exception.response_code)
self.requests.get.expect_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.listener.status.expect_called_once_with(404)
self.response.close.expect_called_once_with()
def test_iter_content_error(self):
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {}
self.response.iter_content.side_effect = requests.Timeout
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.expect_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.response.iter_content.expect_called_once_with(chunk_size=1024)
self.listener.status.expect_called_once_with(200, content_length=None)
self.response.close.expect_called_once_with()
def expect_download(self, path_or_fd=None):
with self.expect_get('http://1',
chunk_size_bytes=13,
timeout_secs=13,
listener=False) as (chunks, expected_listener_calls):
path = self.fetcher.download('http://1',
path_or_fd=path_or_fd,
chunk_size_bytes=13,
timeout_secs=13)
self.response.close.expect_called_once_with()
downloaded = self.concat_chunks(chunks)
return downloaded, path
def test_download(self):
downloaded, path = self.expect_download()
try:
with open(path, 'rb') as fp:
self.assertEqual(downloaded, fp.read())
finally:
os.unlink(path)
def test_download_fd(self):
with temporary_file() as fd:
downloaded, path = self.expect_download(path_or_fd=fd)
self.assertEqual(path, fd.name)
fd.close()
with open(path, 'rb') as fp:
self.assertEqual(downloaded, fp.read())
def test_download_path(self):
with temporary_file() as fd:
fd.close()
downloaded, path = self.expect_download(path_or_fd=fd.name)
self.assertEqual(path, fd.name)
with open(path, 'rb') as fp:
self.assertEqual(downloaded, fp.read())
@mock.patch('time.time')
def test_progress_listener(self, timer):
timer.side_effect = [0, 1.137]
stream = BytesIO()
progress_listener = Fetcher.ProgressListener(width=5, chunk_size_bytes=1, stream=stream)
with self.expect_get('http://baz',
chunk_size_bytes=1,
timeout_secs=37,
chunks=[[1]] * 1024) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://baz',
progress_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assert_listener_calls(expected_listener_calls, chunks)
# We just test the last progress line which should indicate a 100% complete download.
# We control progress bar width (5 dots), size (1KB) and total time downloading (fake 1.137s).
self.assertEqual('100% ..... 1 KB 1.137s\n', stream.getvalue().decode('utf-8').split('\r')[-1])
class FetcherRedirectTest(unittest.TestCase):
# NB(Eric Ayers): Using class variables like this seems horrible, but I can't figure out a better
# to pass state between the test and the RedirectHTTPHandler class because it gets
# re-instantiated on every request.
_URL = None
_URL2_ACCESSED = False
_URL1_ACCESSED = False
# A trivial HTTP server that serves up a redirect from /url2 --> /url1 and some hard-coded
# responses in the HTTP message body.
class RedirectHTTPHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
# The base class implements GET and HEAD.
# Old-style class, so we must invoke __init__ this way.
http.server.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
if self.path.endswith('url2'):
self.send_response(302)
redirect_url = '{}/url1'.format(FetcherRedirectTest._URL)
self.send_header('Location',redirect_url)
self.end_headers()
self.wfile.write('redirecting you to {}'.format(redirect_url).encode('utf-8'))
FetcherRedirectTest._URL2_ACCESSED = True
elif self.path.endswith('url1'):
self.send_response(200)
self.end_headers()
self.wfile.write(b'returned from redirect')
FetcherRedirectTest._URL1_ACCESSED = True
else:
self.send_response(404)
self.end_headers()
@contextmanager
def setup_server(self):
httpd = None
httpd_thread = None
try:
handler = self.RedirectHTTPHandler
httpd = socketserver.TCPServer(('localhost', 0), handler)
port = httpd.server_address[1]
httpd_thread = Thread(target=httpd.serve_forever)
httpd_thread.start()
yield 'http://localhost:{0}'.format(port)
finally:
if httpd:
httpd.shutdown()
if httpd_thread:
httpd_thread.join()
def test_download_redirect(self):
"""Make sure that a server that returns a redirect is actually followed.
Test with a real HTTP server that redirects from one URL to another.
"""
fetcher = Fetcher('/unused/root/dir')
with self.setup_server() as base_url:
self._URL = base_url
self.assertFalse(self._URL2_ACCESSED)
self.assertFalse(self._URL1_ACCESSED)
path = fetcher.download(base_url + '/url2')
self.assertTrue(self._URL2_ACCESSED)
self.assertTrue(self._URL1_ACCESSED)
with open(path) as fp:
self.assertIn(fp.read(), ['returned from redirect\n', 'returned from redirect\r\n'])
| apache-2.0 | 183,568,090,473,102,900 | 37.041885 | 99 | 0.631985 | false |
rananda/cfme_tests | markers/meta.py | 1 | 6131 | # -*- coding: utf-8 -*-
"""meta(\*\*metadata): Marker for metadata addition.
To add metadata to a test simply pass the kwargs as plugins wish.
You can write your own plugins. They generally live in ``metaplugins/`` directory but you can
define them pretty much everywhere py.test loads modules. Plugin has a name and a set
of callbacks that are called when certain combination of keys is present in the metadata.
To define plugin, do like this:
.. code-block:: python
@plugin("plugin_name")
def someaction(plugin_name):
print(plugin_name) # Will contain value of `plugin_name` key of metadict
This is the simplest usage, where it is supposed that the plugin checks only one key with the
same name s the plugin's name. I won't use this one in the latter examples, I will use the
more verbose one.
.. code-block:: python
@plugin("plugin_name", keys=["plugin_name", "another_key"])
def someaction(plugin_name, another_key):
print(plugin_name) # Will contain value of `plugin_name` key of metadict
print(another_key) # Similarly this one
This one reacts when the two keys are present. You can make even more complex setups:
.. code-block:: python
@plugin("plugin_name", keys=["plugin_name"])
@plugin("plugin_name", ["plugin_name", "another_key"]) # You don't have to write keys=
def someaction(plugin_name, another_key=None):
print(plugin_name) # Will contain value of `plugin_name` key of metadict
print(another_key) # Similarly this one if specified, otherwise None
This created a nonrequired parameter for the action.
You can specify as many actions as you wish per plugin. The only thing that limits you is the
correct action choice. First, all the actions are filtered by present keys in metadata. Then
after this selection, only the action with the most matched keywords is called. Bear this
in your mind. If this is not enough in the future, it can be extended if you wish.
It has a command-line option that allows you to disable certain plugins. Just specify
``--disablemetaplugins a,b,c`` where a, b and c are the plugins that should be disabled
"""
from collections import namedtuple
from kwargify import kwargify
from types import FunctionType
import pytest
from lya import AttrDict
from utils.log import logger
def pytest_configure(config):
config.addinivalue_line("markers", __doc__.splitlines()[0])
def pytest_addoption(parser):
group = parser.getgroup('Meta plugins')
group.addoption('--disablemetaplugins',
action='store',
default="",
dest='disable_metaplugins',
help='Comma-separated list of metaplugins to disable')
@pytest.mark.hookwrapper
def pytest_pycollect_makeitem(collector, name, obj):
# Put the meta mark on objects as soon as pytest begins to collect them
if isinstance(obj, FunctionType) and not hasattr(obj, 'meta'):
pytest.mark.meta(obj)
yield
@pytest.mark.hookwrapper
def pytest_collection_modifyitems(session, config, items):
for item in items:
try:
item._metadata = AttrDict(item.function.meta.kwargs)
except AttributeError:
logger.warning('AttributeError getting metadata from item: {}'.format(
str(item.nodeid))
)
item._metadata = AttrDict()
meta = item.get_marker("meta")
if meta is None:
continue
metas = reversed([x.kwargs for x in meta]) # Extract the kwargs, reverse the order
for meta in metas:
item._metadata.update(meta)
yield
@pytest.fixture(scope="function")
def meta(request):
return request.node._metadata
Plugin = namedtuple('Plugin', ['name', 'metas', 'function', 'kwargs'])
class PluginContainer(object):
SETUP = "setup"
TEARDOWN = "teardown"
BEFORE_RUN = "before_run"
AFTER_RUN = "after_run"
DEFAULT = SETUP
def __init__(self):
self._plugins = []
def __call__(self, name, keys=None, **kwargs):
if keys is None:
keys = [name]
def f(g):
self._plugins.append(Plugin(name, keys, kwargify(g), kwargs))
return g # So the markers can be chained
return f
if "plugin" not in globals():
plugin = PluginContainer()
def run_plugins(item, when):
possible_plugins = []
for plug in plugin._plugins:
if all([meta in item._metadata.keys() for meta in plug.metas])\
and plug.kwargs.get("run", plugin.DEFAULT) == when:
possible_plugins.append(plug)
by_names = {}
for plug in possible_plugins:
if plug.name not in by_names:
by_names[plug.name] = []
by_names[plug.name].append(plug)
disabled_plugins = item.config.getvalue("disable_metaplugins") or ""
if not disabled_plugins:
disabled_plugins = []
else:
disabled_plugins = [name.strip() for name in disabled_plugins.split(",")]
for plugin_name, plugin_objects in by_names.iteritems():
if plugin_name in disabled_plugins:
logger.info("Ignoring plugin {} due to commandline option".format(plugin_name))
continue
plugin_objects.sort(key=lambda p: len(p.metas), reverse=True)
plug = plugin_objects[0]
env = {"item": item}
for meta in plug.metas:
env[meta] = item._metadata[meta]
logger.info(
"Calling metaplugin {}({}) with meta signature {} {}".format(
plugin_name, plug.function.__name__, str(plug.metas), str(plug.kwargs)))
plug.function(**env)
logger.info(
"Metaplugin {}({}) with meta signature {} {} has finished".format(
plugin_name, plug.function.__name__, str(plug.metas), str(plug.kwargs)))
def pytest_runtest_setup(item):
run_plugins(item, plugin.SETUP)
def pytest_runtest_teardown(item):
run_plugins(item, plugin.TEARDOWN)
@pytest.mark.hookwrapper
def pytest_runtest_call(item):
run_plugins(item, plugin.BEFORE_RUN)
try:
yield
finally:
run_plugins(item, plugin.AFTER_RUN)
| gpl-2.0 | -9,018,296,741,452,237,000 | 32.872928 | 93 | 0.656174 | false |
opnmind/python-olapdb-palo-web | src/py3/palo_example.py | 1 | 5293 | # -*- coding: utf-8 -*-
from __future__ import print_function
from PyJedoxWebApi.PyJedoxWeb import PyJedoxWeb
P = PyJedoxWeb()
print("SID: " + P.getSid())
database = "control_musterstadt"
cubename = "fcmain"
res = P.CreateDatabase(DBName=database)
print(res)
P.loadDBList()
DB = P.getDB(DBName=database)
DB.loadDimensions()
DB.loadCubes()
DimTest1_Jahre = DB.CreateDimension('Jahre')
DimTest2_Datenart = DB.CreateDimension('Datenart')
DimTest3_Firma = DB.CreateDimension('Firma')
DimTest4_Einheit = DB.CreateDimension('Einheit')
DimTest5_Periode = DB.CreateDimension('Periode')
DimTest6_Position = DB.CreateDimension('Position')
Cubetest = DB.CreateCube(cubename, ('Jahre','Datenart','Firma','Einheit','Periode','Position'))
print(DimTest1_Jahre.addElement("2005"))
print(DimTest1_Jahre.addElement("2006"))
print(DimTest1_Jahre.addElement("2008"))
print(DimTest1_Jahre.addElement("2007"))
print(DimTest1_Jahre.addElement("2009"))
print(DimTest1_Jahre.addElement("2010"))
print(DimTest1_Jahre.addElement("2011"))
print(DimTest1_Jahre.addElement("2012"))
print(DimTest2_Datenart.addElement("Ist"))
print(DimTest2_Datenart.addElement("Plan"))
print(DimTest3_Firma.addElement("SWH"))
print(DimTest3_Firma.addElement("KWL"))
print(DimTest4_Einheit.addElement("Euro"))
print(DimTest4_Einheit.addElement("Anzahl"))
print(DimTest5_Periode.addElement("Jahr"))
print(DimTest5_Periode.addElement("Januar"))
print(DimTest5_Periode.addElement("Februar"))
print(DimTest5_Periode.addElement("März"))
print(DimTest5_Periode.addElement("April"))
print(DimTest5_Periode.addElement("Mai"))
print(DimTest5_Periode.addElement("Juni"))
print(DimTest5_Periode.addElement("Juli"))
print(DimTest5_Periode.addElement("August"))
print(DimTest5_Periode.addElement("September"))
print(DimTest5_Periode.addElement("Oktober"))
print(DimTest5_Periode.addElement("November"))
print(DimTest5_Periode.addElement("Dezember"))
print(DimTest6_Position.addElement("Bilanz"))
print(DimTest6_Position.addElement("GuV"))
Cubetest.Save()
P.loadDBList()
DB = P.getDB(DBName=database)
DB.loadDimensions()
DB.loadCubes()
C = DB.getCube(cubename)
if C == False:
print(cubename + " konnte nicht geladen werden...")
quit()
else:
print(C.getID())
## Delete all elements of the dimension ##
#C.clearDimension('')
D = DB.getDimension("Firma")
print(D.getAttributeCubeName())
for ID in C.getDimensionsIDList():
DimName = DB.getDimensionNameByID(ID)
Dim = DB.getDimension(DimName)
Dim.loadElements()
## Loop through dictionary (not ordered) of the elements of the dimension ##
## the key if the name of the element, the value is the internal ID of the element ##
for key, val in Dim.getElements().__iter__():
print(key, val)
## Loop through (not ordered) elements name of the dimension ##
for E in Dim.getElements():
print(E)
## Loop through (ordered) elements internal ID of the dimension ##
for ID in Dim.getElementsIDList():
print(ID)
Dim = C.getDimensionByID(17)
print(C.getDimensionByID(17).getElements())
Dim = DB.getDimension('Jahre')
Dim.loadElements()
Lst = Dim.getElementsName()
Lst = sorted(Lst)
## Sort the elements of the dimension ##
for Pos, Name in enumerate(Lst):
print(Pos, Name)
print(Dim.MoveElement(Name, Pos))
## Write rule into the cube ##
R = """['Jahr'] = ['Januar'] + ['Februar'] + ['März']"""
print(C.ParseRule(R))
C.CreateRule(R)
## loop through the rules of the cube and execute the 'rule/parse' web api##
Rules = C.getRules()
for R in Rules.values():
print(C.ParseRule(R))
#####################################################################################################
quit()
Coord = (('*'), ('some element', 'other element', 'etc'), ('*'), ('*'), ('*'))
Condition = '>= 0.1 xor <=-0.1'
## Return the output of the cell/export web api ##
Res = C.Dump(Coord)
print(Res)
## Delete all cell of cube subset ##
C.Clear(Coord)
## Save the cube ##
C.Save()
## Return a list of tuples with the cells value ##
print(C.getValues(Coord))
## Loop through the cube subset and return a tuple with coordinates and the value for each cell ##
for Cell in C.DumpCell(Coord):
print(Cell)
## Loop through the cube subset and return an object for each cell ##
## To access the cell value use the "Value" property; to access to the element name ##
## use the "DimensionName" ##
## By default, DumpCellAsObject and DumpCell methods have the "UseKeyDWIfExists" parameter setted to true.
## When the parameter "UseKeyDWIfExists" is setted to true the method "DumpCellAsObject" will look for an "attribute"
## called "KeyDW" for each dimension (the name of the "attribute" is setted in PyPaloWebConfig module
## with the property "__DatawarehouseAliasName") and if such "attribute" will be found, the method will return
## the value of the "attribute" instead of the element name.
## This function is useful to make the output of dump directly loadable in my datawharehouse, in which the elements are identified by canonical "ID number"
for CellObj in C.DumpCellAsObject(Coord):
print(CellObj.Dim1, CellObj.Dim2, CellObj.DimN, CellObj.Value)
| gpl-3.0 | 2,195,037,829,077,101,600 | 31.27673 | 155 | 0.689095 | false |
rndusr/stig | stig/commands/tui/tui.py | 1 | 45516 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
"""Commands that work exclusively in the TUI"""
import functools
import os
import shlex
from functools import partial
from . import _mixin as mixin
from .. import CmdError, CommandMeta, utils
from ... import client, objects
from ...completion import candidates
from ._common import make_tab_title_widget
from ...logging import make_logger # isort:skip
log = make_logger(__name__)
# Import tui.main module only on demand
def _get_keymap_contexts():
from ...tui.tuiobjects import keymap
return tuple(keymap.contexts)
class BindCmd(metaclass=CommandMeta):
name = 'bind'
provides = {'tui'}
category = 'tui'
description = 'Bind keys to commands or other keys'
usage = ('bind [<OPTIONS>] <KEY> <ACTION>',)
examples = ('bind ctrl-a tab ls active',
'bind --context tabs alt-[ tab --focus left',
'bind --context tabs alt-] tab --focus right',
'bind --context torrent alt-! start --force',
"bind --context torrent 'd .' rm",
"bind --context torrent 'd+!' rm --delete-files",
'bind u <up>',
'bind d <down>')
argspecs = (
{'names': ('--context','-c'),
'description': 'Where KEY is grabbed (see CONTEXTS section)'},
{'names': ('--description','-d'),
'description': 'Explanation of what ACTION does'},
{'names': ('KEY',),
'description': 'One or more keys or key combinations (see KEYS section)'},
{'names': ('ACTION',), 'nargs': 'REMAINDER',
'description': ("Any command or '<KEY>' (including the brackets) "
'to translate one key to another')},
)
def __create_CONTEXTS_section():
lines = [
('The same key can be bound multiple times in different contexts. '
'With no context given, the default context is used. The default '
"context gets the key if it isn't mapped in any other relevant context."),
'',
'Available contexts are: ' + ', '.join(str(c) for c in _get_keymap_contexts()),
'',
'EXAMPLE',
'\tbind --context torrent ctrl-t start',
'\tbind --context tabs ctrl-t tab',
'\tbind ctrl-t <left>',
'',
('\tWhen focusing a torrent, <ctrl-t> starts the focused torrent. '
'If focus is not on a torrent but still on a tab (e.g. in an empty '
'torrent list or when reading this text) a new tab is opened. '
'Otherwise (e.g. focus is on the command prompt), <ctrl-t> does the '
'same as <left>.'),
]
return lines
more_sections = {
'CONTEXTS': __create_CONTEXTS_section,
'KEYS': (
'Single-character keys are specified as themselves (e.g. h, X, 5, !, þ, ¥, etc).',
'',
('Special key names are enter, space, tab, backspace, insert, delete, home, end, '
'up, down, left, right, pgup, pgdn and f1-12.'),
'',
("The modifiers 'ctrl', 'alt' and 'shift' are separated with '-' from the key "
"(e.g. alt-i, shift-delete, ctrl-a). shift-x is identical to X."),
'',
("Chained keys are sparated by single spaces (' ') or pluses ('+') and must be "
"given as one argument per chain."),
)
}
def run(self, context, description, KEY, ACTION):
from ...tui.tuiobjects import keymap
key = KEY
if len(ACTION) == 1 and ACTION[0][0] == '<' and ACTION[0][-1] == '>':
# ACTION is another key (e.g. 'j' -> 'down')
action = keymap.mkkey(ACTION[0])
else:
action = ' '.join(shlex.quote(x) for x in ACTION)
if context is None:
context = keymap.DEFAULT_CONTEXT
elif context not in _get_keymap_contexts():
raise CmdError('Invalid context: %r' % (context,))
try:
keymap.bind(key, action, context=context, description=description)
except ValueError as e:
raise CmdError(e)
_own_options = {('--context', '-c'): 1,
('--description', '-d'): 1}
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
posargs = args.posargs(cls._own_options)
if posargs.curarg_index == 2:
# First positional argument is the key, second is the command's name
return candidates.commands()
else:
# Any other positional arguments will be passed to subcmd
subcmd = cls._get_subcmd(args)
if subcmd:
return candidates.for_args(subcmd)
@classmethod
def completion_candidates_opts(cls, args):
"""Return candidates for arguments that start with '-'"""
subcmd = cls._get_subcmd(args)
if subcmd:
# Get completion candidates from subcmd's class
return candidates.for_args(subcmd)
else:
# Parent class generates candidates for our own options
return super().completion_candidates_opts(args)
@classmethod
def completion_candidates_params(cls, option, args):
"""Complete parameters (e.g. --option parameter1,parameter2)"""
if option == '--context':
return candidates.keybinding_contexts()
@classmethod
def _get_subcmd(cls, args):
# posarg[0] is 'bind', posarg[1] is the key
subcmd_start = args.nth_posarg_index(3, cls._own_options)
# Subcmd is only relevant if the cursor is somewhere on it.
# Otherwise, we're on our own arguments.
if subcmd_start is not None and subcmd_start < args.curarg_index:
return args[subcmd_start:]
class UnbindCmd(metaclass=CommandMeta):
name = 'unbind'
provides = {'tui'}
category = 'tui'
description = 'Unbind keys so pressing them has no effect'
usage = ('unbind [<OPTIONS>] <KEY> <KEY> ...',)
examples = ('unbind --context main ctrl-l',
'unbind q')
argspecs = (
{'names': ('--context','-c'),
'description': 'Where KEY is grabbed (see "bind" command)'},
{'names': ('--all','-a'), 'action': 'store_true',
'description': 'Remove all keybindings or only those in given context'},
{'names': ('KEY',), 'nargs': 'REMAINDER',
'description': 'Keys or key combinations (see "bind" command)'},
)
more_sections = {
'COMPLETE UNBINDING': (
('For this command there is a special context called \'all\' that '
'unbinds the key for every context.'),
'',
'Note that \'unbind --all\' is very different from \'unbind --context all\''
)
}
def run(self, context, all, KEY):
from ...tui.tuiobjects import keymap
if context is not None and context not in _get_keymap_contexts():
raise CmdError('Invalid context: %r' % (context,))
if KEY:
if context:
success = self._unbind_keys(keys=KEY, context=context)
elif all:
success = self._unbind_keys(keys=KEY, context=keymap.ALL_CONTEXTS)
else:
success = self._unbind_keys(keys=KEY, context=keymap.DEFAULT_CONTEXT)
else:
success = self._unbind_all_keys(context=context)
if not success:
raise CmdError()
def _unbind_keys(self, keys, context):
from ...tui.tuiobjects import keymap
success = True
for key in keys:
try:
keymap.unbind(key, context=context)
except ValueError as e:
self.error(e)
success = False
return success
def _unbind_all_keys(self, context):
from ...tui.tuiobjects import keymap
if context is None:
keymap.clear()
else:
keymap.clear(context=context)
return True
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
return candidates.keybinding_keys(args)
@classmethod
def completion_candidates_params(cls, option, args):
"""Complete parameters (e.g. --option parameter1,parameter2)"""
if option == '--context':
return candidates.keybinding_contexts()
class SetCommandCmd(mixin.placeholders, metaclass=CommandMeta):
name = 'setcommand'
aliases = ('setcmd',)
provides = {'tui'}
category = 'tui'
description = 'Open the command line and insert a command'
usage = ('setcommand [--trailing-space] <COMMAND> <ARGUMENT> <ARGUMENT> ...',)
examples = (
'setcommand --trailing-space tab ls',
'\tAsk the user for a filter before opening a new torrent list.',
'',
'setcommand move {{location}}/',
('\tMove the focused torrent, using the path of the currently focused '
'list item as a starting point.'),
'',
'setcommand move id={{id}} {{location}}/',
('\tSame as above, but make sure to move the correct torrent in case '
'it is removed from the list while typing in the new path, e.g. if '
'we\'re listing active torrents and the focused torrent stops being active.'),
)
argspecs = (
{'names': ('COMMAND',), 'nargs': 'REMAINDER',
'description': 'Command the can user edit before executing it (see PLACEHOLDERS)'},
{'names': ('--trailing-space', '-s'), 'action': 'store_true',
'description': 'Append a space at the end of COMMAND'},
)
more_sections = {
'PLACEHOLDERS': mixin.placeholders.HELP,
}
async def run(self, COMMAND, trailing_space):
log.debug('Unresolved command: %r', COMMAND)
args = await self.parse_placeholders(*COMMAND)
log.debug('Command with resolved placeholders: %r', args)
if args:
cmdstr = ' '.join(shlex.quote(str(arg)) for arg in args)
if trailing_space:
cmdstr += ' '
from ...tui.tuiobjects import widgets
widgets.show('cli')
widgets.cli.base_widget.edit_text = cmdstr
widgets.cli.base_widget.edit_pos = len(cmdstr)
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
posargs = args.posargs()
if posargs.curarg_index == 1:
# First positional argument is the subcmd's name
return candidates.commands()
else:
# Any other positional arguments are part of subcmd
subcmd = cls._get_subcmd(args)
if subcmd:
return candidates.for_args(subcmd)
@classmethod
def completion_candidates_opts(cls, args):
"""Return candidates for arguments that start with '-'"""
subcmd = cls._get_subcmd(args)
if subcmd:
# Get completion candidates for subcmd
return candidates.for_args(subcmd)
else:
# Parent class generates candidates for our own options
return super().completion_candidates_opts(args)
@staticmethod
def _get_subcmd(args):
# First posarg is 'setcommand'
subcmd_start = args.nth_posarg_index(2)
# Subcmd is only relevant if the cursor is somewhere on it.
# Otherwise, we're on our own arguments.
if subcmd_start is not None and subcmd_start < args.curarg_index:
return args[subcmd_start:]
class InteractiveCmd(mixin.placeholders, metaclass=CommandMeta):
name = 'interactive'
provides = {'tui'}
category = 'tui'
description = 'Complete partial command with user input from a dialog'
usage = ('interactive <COMMAND> [<OPTIONS>]',)
examples = (
'interactive "move \'[{location}/]\'"',
'\tAsk for the destination directory when moving torrents.',
'',
'tab ls & interactive "limit \'[]\'" --per-change --on-cancel "tab --close --focus left"',
('\tOpen a new tab with all torrents and filter them as you type. '
'Keep the tab open if the user input field is accepted with <enter> '
'or close the tab and focus the previous one if the dialog is aborted '
'with <escape>.'),
'',
'tab ls stopped & interactive \'limit "[]"\' -p -a "mark --all & start" -x "tab --close --focus left"',
('\tSearch for stopped torrents only. When accepted, the matching torrents '
'are started. The new tab is always closed, whether the dialog is '
'accepted or not.'),
)
argspecs = (
{'names': ('COMMAND',),
'description': ('Any command with "[PREFILLED TEXT]" as marker for '
'user input field (see USER INPUT FIELDS) and '
'"{{NAM}}" as placeholder for values of the currently '
'focused list item (see PLACEHOLDERS)')},
{'names': ('--per-change', '-p'), 'action': 'store_true',
'description': 'Whether to run COMMAND every time the input is changed'},
{'names': ('--on-accept', '-a'), 'metavar': 'ACCEPT COMMAND',
'description': 'Command to run when the dialog is accepted (with <enter>)'},
{'names': ('--on-cancel', '-c'), 'metavar': 'CANCEL COMMAND',
'description': 'Command to run when the dialog is aborted (with <escape>)'},
{'names': ('--on-close', '-x'), 'metavar': 'CLOSE COMMAND',
'description': 'Command to run after the dialog is closed either way'},
{'names': ('--ignore-errors', '-i'), 'action': 'store_true',
'description': 'Whether to ignore errors from COMMAND'},
)
more_sections = {
'COMMANDS': (('For each occurrence of "[]" in any command, the user is '
'prompted for input to insert at that point. Any text between '
'"[" and "]" is used as the initial user input. "[" can be '
'escaped with "\\" in which case the corresponding "]" is also '
'interpreted literally.'),
'',
('COMMAND is called if the user presses <enter> or, if --per-change '
'is given, after any user input field is changed.'),
'',
('COMMAND must contain at least one user input field. Any of the '
'commands described below are called without user interaction if '
'they don\'t contain any user input fields.'),
'',
('ACCEPT COMMAND is called after COMMAND if the user accepts the '
'dialog by pressing <enter>.'),
'',
'CANCEL COMMAND is called if the user aborts the COMMAND dialog.',
'',
('CLOSE COMMAND is always called when the dialog is closed either '
'by accepting or by cancelling it.')),
'PLACEHOLDERS': mixin.placeholders.HELP,
}
import re
_input_regex = re.compile(r'(?<!\\)(\[.*?\])')
async def run(self, COMMAND, per_change, on_accept, on_cancel, on_close, ignore_errors):
cmd = await self._parse_cmd(COMMAND)
accept_cmd = await self._parse_cmd(on_accept) if on_accept else None
cancel_cmd = await self._parse_cmd(on_cancel) if on_cancel else None
close_cmd = await self._parse_cmd(on_close) if on_close else None
self._ignore_errors = ignore_errors
if len(cmd) == 1:
# There are no user input markers
raise CmdError('No user input fields ("[]"): %s' % COMMAND)
def close_cb():
self._run_cmd_or_open_dialog(close_cmd)
if per_change:
def accept_cb():
self._run_cmd_from_dialog()
self._run_cmd_or_open_dialog(accept_cmd)
def cancel_cb():
self._run_cmd_or_open_dialog(cancel_cmd)
self._open_dialog(cmd,
on_change=self._run_cmd_from_dialog,
on_accept=accept_cb,
on_cancel=cancel_cb,
on_close=close_cb)
else:
def accept_cb():
self._run_cmd_from_dialog()
self._run_cmd_or_open_dialog(accept_cmd)
def cancel_cb():
self._run_cmd_or_open_dialog(cancel_cmd)
self._open_dialog(cmd,
on_accept=accept_cb,
on_cancel=cancel_cb,
on_close=close_cb)
_WIDGET_NAME = 'interactive_prompt'
_MIN_EDIT_WIDTH = 25
_MAX_EDIT_WIDTH = 50
def _open_dialog(self, cmd, on_change=None, on_accept=None, on_cancel=None, on_close=None):
import urwid
from ...tui.cli import CLIEditWidget
def accept_cb(widget):
# CLIEditWidget only automatically appends to history when it gets
# an <enter> key, but only one gets it if there are multiple user
# input fields.
for part in self._edit_widgets:
part.append_to_history()
self._close_dialog()
if on_accept: on_accept()
if on_close: on_close()
def cancel_cb(widget):
self._close_dialog()
if on_cancel: on_cancel()
if on_close: on_close()
def change_cb(widget):
if on_change: on_change()
# Derive history file name from command
import re
filename = re.sub('[/\n]', '__', ''.join(cmd))
history_file_base = os.path.join(objects.localcfg['tui.cli.history-dir'].full_path, filename)
columns_args = [('pack', urwid.Text(':'))]
self._cmd_parts = []
self._edit_widgets = []
edit_index = 0
for part in cmd:
if part[0] == '[' and part[-1] == ']':
edit_index += 1
history_file = history_file_base + '.input%d' % edit_index
log.debug('History file for edit #%d: %r', edit_index, history_file)
edit_widget = CLIEditWidget(on_change=change_cb,
on_accept=accept_cb,
on_cancel=cancel_cb,
history_file=history_file)
edit_widget.edit_text = part[1:-1]
edit_widget.edit_pos = len(edit_widget.edit_text)
columns_args.append(urwid.AttrMap(edit_widget, 'prompt'))
self._cmd_parts.append(edit_widget)
self._edit_widgets.append(edit_widget)
else:
columns_args.append(('pack', urwid.Text(part)))
self._cmd_parts.append(part)
class MyColumns(urwid.Columns):
"""Use <tab> and <shift-tab> to move focus between input fields"""
def keypress(self, size, key):
def move_right():
if self.focus_position < len(self.contents) - 1:
self.focus_position += 1
else:
self.focus_position = 0
def move_left():
if self.focus_position > 0:
self.focus_position -= 1
else:
self.focus_position = len(self.contents) - 1
if key == 'tab':
move_right()
while not isinstance(self.focus.base_widget, urwid.Edit):
move_right()
elif key == 'shift-tab':
move_left()
while not isinstance(self.focus.base_widget, urwid.Edit):
move_left()
else:
log.debug('focus pos: %r', self.focus_position)
return super().keypress(size, key)
columns_widget = MyColumns(columns_args)
# Close any previously opened dialog
from ...tui.tuiobjects import widgets
if widgets.exists(self._WIDGET_NAME):
self._close_dialog()
# Focus the first empty input widget if there are any
for i,(w,_) in enumerate(columns_widget.contents):
w = w.base_widget
log.debug('%02d: %r', i, w)
if hasattr(w, 'edit_text') and w.edit_text == '':
columns_widget.focus_position = i
break
widgets.add(name=self._WIDGET_NAME,
widget=urwid.AttrMap(columns_widget, 'cli'),
position=widgets.get_position('cli'),
removable=True,
options='pack')
def _close_dialog(self):
from ...tui.tuiobjects import widgets
widgets.remove(self._WIDGET_NAME)
widgets.focus_name = 'main'
def _run_cmd_or_open_dialog(self, cmd):
if not cmd:
return
elif len(cmd) == 1:
log.debug('Running command without dialog: %r', cmd)
self._run_cmd(cmd[0])
else:
log.debug('Running command in dialog: %r', cmd)
self._open_dialog(cmd, on_accept=self._run_cmd_from_dialog)
def _run_cmd_from_dialog(self):
cmd = []
for part in self._cmd_parts:
if hasattr(part, 'edit_text'):
cmd.append(part.edit_text)
else:
cmd.append(part)
cmd = ''.join(cmd)
log.debug('Got command from current dialog: %r', cmd)
self._run_cmd(cmd)
def _run_cmd(self, cmd):
log.debug('Running cmd: %r', cmd)
if self._ignore_errors:
# Overload the error() method on the command's instance
objects.cmdmgr.run_task(cmd, error=lambda msg: None)
else:
objects.cmdmgr.run_task(cmd)
async def _parse_cmd(self, cmd):
assert isinstance(cmd, str)
args = await self.parse_placeholders(cmd)
return self._split_cmd_at_inputs(args[0])
def _split_cmd_at_inputs(self, cmd):
"""
Split `cmd` so that each input marker ("[...]") is a single item
Example result:
['somecmd --an-argument ', '[user input goes here]', ' some more arguments']
"""
log.debug('Splitting %r', cmd)
parts = [part for part in self._input_regex.split(cmd) if part]
log.debug('Split: %r', parts)
for i in range(len(parts)):
parts[i] = parts[i].replace('\\[', '[')
log.debug('Unescaped: %r', parts)
return parts
class MarkCmd(metaclass=CommandMeta):
name = 'mark'
provides = {'tui'}
category = 'tui'
description = 'Select torrents or files for an action'
usage = ('mark [<OPTIONS>]',)
argspecs = (
{'names': ('--focus-next','-n'), 'action': 'store_true',
'description': 'Move focus forward after marking or toggling'},
{'names': ('--toggle','-t'), 'action': 'store_true',
'description': 'Mark if unmarked, unmark if marked'},
{'names': ('--all','-a'), 'action': 'store_true',
'description': 'Mark or toggle all items'},
)
more_sections = {
'NOTES': (('The column "marked" must be in the "columns.*" settings. Otherwise '
'marked list items are indistinguishable from unmarked ones.'),
'',
('The character that is displayed in the "marked" column is '
'specified by the settings "tui.marked.on" and "tui.marked.off".')),
}
def run(self, focus_next, toggle, all):
from ...tui.tuiobjects import tabs
widget = tabs.focus
if not widget.has_marked_column:
raise CmdError('Nothing to mark here.')
else:
widget.mark(toggle=toggle, all=all)
if focus_next:
widget.focus_position += 1
class UnmarkCmd(metaclass=CommandMeta):
name = 'unmark'
provides = {'tui'}
category = 'tui'
description = 'Deselect torrents or files for an action'
usage = ('unmark [<OPTIONS>]',)
argspecs = (
{'names': ('--focus-next','-n'), 'action': 'store_true',
'description': 'Move focus forward after unmarking or toggling'},
{'names': ('--toggle','-t'), 'action': 'store_true',
'description': 'Mark if unmarked, unmark if marked'},
{'names': ('--all','-a'), 'action': 'store_true',
'description': 'Unmark or toggle all items'},
)
more_sections = MarkCmd.more_sections
def run(self, focus_next, toggle, all):
from ...tui.tuiobjects import tabs
widget = tabs.focus
if not widget.has_marked_column:
raise CmdError('Nothing to unmark here.')
else:
widget.unmark(toggle=toggle, all=all)
if focus_next:
widget.focus_position += 1
class QuitCmd(metaclass=CommandMeta):
name = 'quit'
provides = {'tui'}
category = 'tui'
description = 'Terminate the TUI'
def run(self):
import urwid
raise urwid.ExitMainLoop()
class FindCmd(metaclass=CommandMeta):
name = 'find'
provides = {'tui'}
category = 'tui'
description = 'Find text in the content of the focused tab'
usage = ('find [<OPTIONS>] [<PHRASE>]',)
argspecs = (
{'names': ('--clear','-c'), 'action': 'store_true',
'description': ('Remove previously applied filter; this is '
'the default if no PHRASE arguments are provided')},
{'names': ('--next','-n'), 'action': 'store_true',
'description': 'Jump to next match (call `find <PHRASE>` first)'},
{'names': ('--previous','-p'), 'action': 'store_true',
'description': 'Jump to previous match (call `find <PHRASE>` first)'},
{'names': ('PHRASE',), 'nargs': '*',
'description': 'Search phrase'},
)
def run(self, clear, next, previous, PHRASE):
from ...tui.tuiobjects import tabs
content = tabs.focus.base_widget
if not hasattr(content, 'search_phrase'):
raise CmdError('This tab does not support finding.')
elif next and previous:
raise CmdError('The options --next and --previous contradict each other.')
elif next:
if content.search_phrase is None:
raise CmdError('Set a search phrase first with `find <PHRASE>`.')
else:
content.jump_to_next_match()
elif previous:
if content.search_phrase is None:
raise CmdError('Set a search phrase first with `find <PHRASE>`.')
else:
content.jump_to_prev_match()
elif clear:
content.search_phrase = None
else:
try:
content.search_phrase = ' '.join(PHRASE)
content.maybe_jump_to_next_match()
except ValueError as e:
raise CmdError(e)
class LimitCmd(metaclass=CommandMeta):
name = 'limit'
provides = {'tui'}
category = 'tui'
description = 'Limit contents of the focused tab by applying more filters'
usage = ('limit [<OPTIONS>] [<FILTER> <FILTER> ...]',)
argspecs = (
{'names': ('--clear','-c'), 'action': 'store_true',
'description': ('Remove previously applied filter; this is '
'the default if no FILTER arguments are provided')},
{'names': ('FILTER',), 'nargs': '*',
'description': 'Filter expression (see `help filters`)'},
)
def run(self, clear, FILTER):
from ...tui.tuiobjects import tabs
content = tabs.focus.base_widget
if not hasattr(content, 'secondary_filter'):
raise CmdError('This tab does not support limiting.')
else:
if clear or not FILTER:
content.secondary_filter = None
else:
try:
content.secondary_filter = FILTER
except ValueError as e:
raise CmdError(e)
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
from ...tui.tuiobjects import tabs
from ...tui.views import (TorrentListWidget, FileListWidget,
PeerListWidget, TrackerListWidget,
SettingListWidget)
widget = tabs.focus.base_widget
if hasattr(widget, 'secondary_filter'):
if isinstance(widget, TorrentListWidget):
return candidates.torrent_filter(args.curarg)
elif isinstance(widget, FileListWidget):
torrent_filter = 'id=%s' % (widget.focused_torrent_id,)
return candidates.file_filter(args.curarg, torrent_filter)
elif isinstance(widget, PeerListWidget):
return candidates.peer_filter(args.curarg, None)
elif isinstance(widget, TrackerListWidget):
torrent_filter = '|'.join('id=%s' % (itemw.torrent_id,)
for itemw in widget.items)
return candidates.tracker_filter(args.curarg, torrent_filter)
elif isinstance(widget, SettingListWidget):
return candidates.setting_filter(args.curarg)
class SortCmd(metaclass=CommandMeta):
name = 'sort'
aliases = ()
provides = {'tui'}
category = 'tui'
description = "Sort lists of torrents/peers/trackers/etc"
usage = ('sort [<OPTIONS>] [<ORDER> <ORDER> <ORDER> ...]',)
examples = ('sort tracker status !rate-down',
'sort --add eta')
argspecs = (
{'names': ('ORDER',), 'nargs': '*',
'description': 'How to sort list items (see SORT ORDERS section)'},
{'names': ('--add', '-a'), 'action': 'store_true',
'description': 'Append ORDERs to current list of sort orders instead of replacing it'},
{'names': ('--delete', '-d'), 'action': 'store_true',
'description': 'Delete ORDERs from current list of sort orders instead of replacing it'},
{'names': ('--reset', '-r'), 'action': 'store_true',
'description': 'Go back to sort order that was used when the list was created'},
{'names': ('--none', '-n'), 'action': 'store_true',
'description': 'Remove all sort orders from the list'},
)
def _list_sort_orders(title, sortcls):
return (title,) + \
tuple('\t{}\t - \t{}'.format(', '.join((sname,) + s.aliases), s.description)
for sname,s in sorted(sortcls.SORTSPECS.items()))
more_sections = {
'SORT ORDERS': (_list_sort_orders('TORRENT LISTS', client.TorrentSorter) +
('',) +
_list_sort_orders('PEER LISTS', client.PeerSorter) +
('',) +
_list_sort_orders('TRACKER LISTS', client.TrackerSorter))
}
async def run(self, add, delete, reset, none, ORDER):
from ...tui.tuiobjects import tabs
current_tab = tabs.focus.base_widget
if reset:
current_tab.sort = 'RESET'
if none:
current_tab.sort = None
if ORDER:
# # Find appropriate sorter class for focused list
sortcls = self._widget2sortcls(current_tab)
if sortcls is None:
raise CmdError('Current tab is not sortable.')
try:
new_sort = sortcls(utils.listify_args(ORDER))
except ValueError as e:
raise CmdError(e)
if add and current_tab.sort is not None:
current_tab.sort += new_sort
elif delete and current_tab.sort is not None:
current_tab.sort -= new_sort
else:
current_tab.sort = new_sort
@staticmethod
def _widget2sortcls(list_widget):
from ...tui.views import (TorrentListWidget, PeerListWidget,
TrackerListWidget, SettingListWidget)
if isinstance(list_widget, TorrentListWidget):
return client.TorrentSorter
elif isinstance(list_widget, PeerListWidget):
return client.PeerSorter
elif isinstance(list_widget, TrackerListWidget):
return client.TrackerSorter
elif isinstance(list_widget, SettingListWidget):
return client.SettingSorter
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
from ...tui.tuiobjects import tabs
sortcls = cls._widget2sortcls(tabs.focus.base_widget)
if sortcls is not None:
return candidates.sort_orders(sortcls.__name__)
class TabCmd(mixin.select_torrents, metaclass=CommandMeta):
name = 'tab'
provides = {'tui'}
category = 'tui'
description = 'Open, close and focus tabs'
usage = ('tab [<OPTIONS>]',
'tab [<OPTIONS>] <COMMAND>')
examples = ('tab',
'tab -c',
'tab -c active',
'tab ls active',
'tab -b ls active',
'tab -f active',
'tab -f 3 ls active',
'tab -b -f -1 ls active')
argspecs = (
{'names': ('--background', '-b'), 'action': 'store_true',
'description': 'Do not focus new tab'},
{'names': ('--close-all', '-C'), 'action': 'store_true',
'description': 'Close all tabs'},
{'names': ('--close', '-c'), 'nargs': '?', 'default': False, 'document_default': False,
'description': 'Close focused or specified tab (see TAB IDENTIFIERS SECTION)'},
{'names': ('--focus', '-f'),
'description': 'Focus specified tab (see TAB IDENTIFIERS SECTION)'},
{'names': ('--move', '-m'),
'description': 'Move focused tab left, right or to absolute position'},
{'names': ('--title', '-t'),
'description': 'Manually set tab title instead of generating one'},
{'names': ('COMMAND',), 'nargs': 'REMAINDER',
'description': ('Command to run in tab')},
)
more_sections = {
'TAB IDENTIFIERS': (
'There are three ways to specify a tab (e.g. to close it):',
(' - \tIntegers specify the position of the tab. Positive numbers '
'start from the left and negative numbers start from the right '
'(1 (and 0) is the leftmost tab and -1 is the rightmost tab).'),
(' - \t"left" and "right" specify the tabs next to the '
'currently focused tab.'),
(' - \tAnything else is assumed to be a part of a tab title. If there '
'are multiple matches, the first match from the left wins.'),
),
}
async def run(self, background, close_all, close, focus, move, title, COMMAND):
from ...tui.tuiobjects import tabs
tabid_old = tabs.get_id()
# Find relevant tab IDs and fail immediately if unsuccessful
if focus is not None:
tabid_focus = self._get_tab_id(focus)
if tabid_focus is None:
raise CmdError('No such tab: %r' % (focus,))
if close is not False:
tabid_close = self._get_tab_id(close)
if tabid_close is None:
if close is None:
raise CmdError('No tab is open')
else:
raise CmdError('No such tab: %r' % (close,))
# COMMAND may get additional hidden arguments as instance attributes
cmd_attrs = {}
# Apply close/focus/move operations
if focus is not None:
log.debug('Focusing tab %r', tabid_focus)
tabs.focus_id = tabid_focus
if close_all is not False:
log.debug('Closing all tabs')
tabs.clear()
elif close is not False:
log.debug('Closing tab %r', tabid_close)
tabs.remove(tabid_close)
elif move and tabs.focus:
self._move_tab(tabs, move)
# If no tabs were closed, focused or moved, open a new one
if close is False and close_all is False and focus is None and not move:
titlew = make_tab_title_widget(title or 'Empty tab',
attr_unfocused='tabs.unfocused',
attr_focused='tabs.focused')
tabs.insert(titlew, position='right')
log.debug('Inserted new tab at position %d: %r', tabs.focus_position, titlew.base_widget.text)
# Maybe provide a user-specified tab title to the new command
if title:
cmd_attrs['title'] = title
if COMMAND:
# Execute command
cmd_str = ' '.join(shlex.quote(arg) for arg in COMMAND)
log.debug('Running command in tab %s with args %s: %r',
tabs.focus_position,
', '.join('%s=%r' % (k,v) for k,v in cmd_attrs.items()),
cmd_str)
success = await objects.cmdmgr.run_async(cmd_str, **cmd_attrs)
else:
success = True
if background:
tabs.focus_id = tabid_old
else:
content = tabs.focus
if content is not None and hasattr(content, 'marked_count'):
from ...tui.tuiobjects import bottombar
bottombar.marked.update(content.marked_count)
return success
def _get_tab_id(self, pos):
from ...tui.tuiobjects import tabs
if len(tabs) == 0:
return None
if pos is None:
return tabs.focus_id
def find_id_by_index(index):
try:
index = int(index)
except ValueError:
pass
else:
index_max = len(tabs) - 1
# Internally, first tab is at index 0, but for users it's 1, unless
# they gave us 0, in which case we assume they mean 1.
index = index - 1 if index > 0 else index
# Limit index to index_max, considering negative values when
# indexing from the right.
if index < 0:
index = max(index, -index_max - 1)
else:
index = min(index, index_max)
return tabs.get_id(index)
def find_right_left_id(right_or_left):
tabcount = len(tabs)
if tabcount > 1:
cur_index = tabs.focus_position
cur_index = 1 if cur_index is None else cur_index
if right_or_left == 'left':
return tabs.get_id(max(0, cur_index - 1))
elif right_or_left == 'right':
return tabs.get_id(min(tabcount - 1, cur_index + 1))
def find_id_by_title(string):
for index,title in enumerate(tabs.titles):
if string in title.original_widget.text:
return tabs.get_id(index)
# Try to use pos as an index
tabid = find_id_by_index(pos)
if tabid is not None:
log.debug('Found tab ID by index: %r -> %r', pos, tabid)
return tabid
pos_str = str(pos)
# Move to left/right tab
tabid = find_right_left_id(pos_str)
if tabid is not None:
log.debug('Found tab ID by direction: %r -> %r', pos, tabid)
return tabid
# Try to find tab title
tabid = find_id_by_title(pos_str)
if tabid is not None:
log.debug('Found tab ID by title: %r -> %r', pos, tabid)
return tabid
def _move_tab(self, tabs, move):
if move == 'left':
tabs.move(tabs.get_id(), 'left')
elif move == 'right':
tabs.move(tabs.get_id(), 'right')
else:
try:
index = int(move)
except (ValueError, TypeError):
raise CmdError('--move argument must be "left", "right" or tab index: %r' % (move,))
else:
# Positive tab index starts at 0, negative at -1
if index > 0:
index -= 1
tabs.move(tabs.get_id(), index)
_own_options = {('--close', '-c'): 1,
('--focus', '-f'): 1,
('--title', '-t'): 1}
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
posargs = args.posargs(cls._own_options)
if posargs.curarg_index == 1:
# First positional argument is the subcmd's name
return candidates.commands()
else:
# Any other positional arguments will be passed to subcmd
subcmd = cls._get_subcmd(args)
if subcmd:
return candidates.for_args(subcmd)
@classmethod
def completion_candidates_opts(cls, args):
"""Return candidates for arguments that start with '-'"""
subcmd = cls._get_subcmd(args)
if subcmd:
# Get completion candidates for subcmd
return candidates.for_args(subcmd)
else:
# Parent class generates candidates for our own options
return super().completion_candidates_opts(args)
@classmethod
def completion_candidates_params(cls, option, args):
"""Complete parameters (e.g. --option parameter1,parameter2)"""
if option in ('--close', '--focus'):
return candidates.tab_titles()
@classmethod
def _get_subcmd(cls, args):
# First posarg is 'tab'
subcmd_start = args.nth_posarg_index(2, cls._own_options)
# Subcmd is only relevant if the cursor is somewhere on it.
# Otherwise, we're on our own arguments.
if subcmd_start is not None and subcmd_start < args.curarg_index:
return args[subcmd_start:]
class TUICmd(metaclass=CommandMeta):
name = 'tui'
provides = {'tui'}
category = 'tui'
description = 'Show or hide parts of the text user interface'
usage = ('tui <ACTION> <ELEMENT> <ELEMENT> ...',)
examples = ('tui toggle log',
'tui hide topbar.help')
argspecs = (
{'names': ('ACTION',), 'choices': ('show', 'hide', 'toggle'),
'description': '"show", "hide" or "toggle"'},
{'names': ('ELEMENT',), 'nargs': '+',
'description': ('Name of TUI elements; '
'see ELEMENT NAMES section for a list')},
)
# HelpManager supports sequences of lines or a callable that returns them
more_sections = {'ELEMENT NAMES': lambda: ('Available TUI element names are: ' +
', '.join(_tui_element_names()),)}
def run(self, ACTION, ELEMENT):
from ...tui.tuiobjects import widgets
widget = None
success = True
for element in utils.listify_args(ELEMENT):
# Resolve path
path = element.split('.')
target_name = path.pop(-1)
current_path = []
widget = widgets
try:
for widgetname in path:
current_path.append(widgetname)
widget = getattr(widget, widgetname)
except AttributeError:
self.error('Unknown TUI element: %r' % ('.'.join(current_path),))
if widget is not None:
action = getattr(widget, ACTION)
if any(ACTION == x for x in ('hide', 'toggle')):
action = partial(action, free_space=False)
log.debug('%sing %s in %s', ACTION.capitalize(), target_name, widget)
try:
action(target_name)
except ValueError as e:
success = False
self.error(e)
else:
success = success and True
if not success:
raise CmdError()
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
posargs = args.posargs()
if posargs.curarg_index == 1:
for argspec in cls.argspecs:
if 'ACTION' in argspec['names']:
return candidates.Candidates(argspec['choices'],
label='Action')
else:
return candidates.Candidates(_tui_element_names(),
label='Element')
# Lazily load element names from tui module to avoid importing TUI stuff if possible
@functools.lru_cache()
def _tui_element_names():
from ...tui import tuiobjects
return tuple(str(name) for name in sorted(tuiobjects.widgets.names_recursive))
| gpl-3.0 | -9,125,426,363,984,953,000 | 38.994728 | 111 | 0.546447 | false |
swprojects/Serial-Sequence-Creator | dialogs/stepvoltage.py | 1 | 10958 | """
Description:
Requirements: pySerial, wxPython Phoenix
glossary and of other descriptions:
DMM - digital multimeter
PSU - power supply
SBC - single board computer
INS - general instrument commands
GEN - general sequence instructions
"""
import wx
import theme
import base
# from wx.lib.agw import spinctrl
class StepVoltage(wx.Dialog):
def __init__(self, parent, instruments, variables):
wx.Dialog.__init__(self,
parent,
title="Step Voltage")
self._variables = variables
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
sbox = wx.StaticBox(panel, label="")
sbox_sizer = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)
grid = wx.GridBagSizer(5,5)
row = 0
# row += 1 #let's start at 1, to give some space
lbl_psu = wx.StaticText(panel, label="Power Supply:")
choices = instruments
self.cbox_psu = wx.ComboBox(panel, choices=choices)
self.cbox_psu.Bind(wx.EVT_COMBOBOX, self.OnPsuSelected)
grid.Add(lbl_psu, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.cbox_psu, pos=(row,1), span=(0,3), flag=wx.ALL|wx.EXPAND, border=5)
grid.AddGrowableCol(1)
row += 1
lbl_initial = wx.StaticText(panel, label="Initial Voltage:")
self.spin_initial = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_initial2 = wx.SpinCtrl(panel, max=99, min=0, size=(50, -1))
self.spin_initial.Bind(wx.EVT_SPINCTRL, self.OnSpinInitial)
self.spin_initial2.Bind(wx.EVT_SPINCTRL, self.OnSpinInitial)
self.lbl_voltage = wx.StaticText(panel, label="0.0v")
grid.Add(lbl_initial, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_initial, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_initial2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_voltage, pos=(row,3), flag=wx.ALL, border=5)
row += 1
lbl_final = wx.StaticText(panel, label="Final Voltage (Limit):")
self.spin_final = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_final2 = wx.SpinCtrl(panel, max=99, min=0, size=(50, -1))
self.spin_final.Bind(wx.EVT_SPINCTRL, self.OnSpinFinal)
self.spin_final2.Bind(wx.EVT_SPINCTRL, self.OnSpinFinal)
self.lbl_voltage2 = wx.StaticText(panel, label="0.0v")
grid.Add(lbl_final, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_final, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_final2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_voltage2, pos=(row,3), flag=wx.ALL, border=5)
row += 1
lbl_step = wx.StaticText(panel, label="Voltage Increment/Decrement:")
self.spin_step = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_step2 = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_step.Bind(wx.EVT_SPINCTRL, self.OnSpinStep)
self.spin_step2.Bind(wx.EVT_SPINCTRL, self.OnSpinStep)
self.lbl_step2 = wx.StaticText(panel, label="0.0v")
grid.Add(lbl_step, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_step, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_step2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_step2, pos=(row,3), flag=wx.ALL, border=5)
row += 1
lbl_step_delay = wx.StaticText(panel, label="Delay before Increment/decrement (ms):")
self.spin_step_delay = wx.SpinCtrl(panel, max=59, min=0, size=(50, -1))
self.spin_step_delay2 = wx.SpinCtrl(panel, max=59, min=0, size=(50, -1))
self.lbl_step_delay = wx.StaticText(panel, label="0.0s")
self.spin_step_delay.Bind(wx.EVT_SPINCTRL, self.OnSpinStepDelay)
self.spin_step_delay2.Bind(wx.EVT_SPINCTRL, self.OnSpinStepDelay)
grid.Add(lbl_step_delay, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_step_delay, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_step_delay2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_step_delay, pos=(row,3), flag=wx.ALL, border=5)
row += 1
lbl_repeat = wx.StaticText(panel, label="Repeat:")
spin_repeat = wx.SpinCtrl(panel, max=999, min=0, size=(50, -1))
grid.Add(lbl_repeat, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(spin_repeat, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=5)
row += 1
lbl_local = wx.StaticText(panel, label="Local Name:")
default = defaultname = "stepvolt"
index = 1
while defaultname in self._variables["locals"]:
defaultname = default + str(index)
index += 1
self.text_local = wx.TextCtrl(panel, value=defaultname)
grid.Add(lbl_local, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.text_local, pos=(row,1), span=(0,2), flag=wx.ALL|wx.EXPAND, border=5)
row += 1
lbl_global = wx.StaticText(panel, label="Global Name:")
self.text_global = wx.TextCtrl(panel, value="")
grid.Add(lbl_global, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.text_global, pos=(row,1), span=(0,2), flag=wx.ALL|wx.EXPAND, border=5)
# row += 1
# self.lbl_error = wx.StaticText(panel, label="")
# grid.Add(self.lbl_error, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
# if self.cbox_psu.GetSelection() == -1:
# self.lbl_error.SetLabel("*Cannot add this step unless a power supply is selected")
sbox_sizer.Add(grid, 1, wx.ALL|wx.EXPAND, 0)
sbox_sizer.AddSpacer(10)
#-----
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.AddStretchSpacer()
btn_cancel = wx.Button(panel, label="Cancel", id=wx.ID_CANCEL)
btn_cancel.Bind(wx.EVT_BUTTON, self.OnButton)
self.btn_add = wx.Button(panel, label="Add", id=wx.ID_OK)
self.btn_add.Bind(wx.EVT_BUTTON, self.OnButton)
# self.btn_add.Disable()
hsizer.Add(btn_cancel, 0, wx.ALL|wx.EXPAND, 5)
hsizer.Add(self.btn_add, 0, wx.ALL|wx.EXPAND, 5)
#add to main sizer
sizer.Add(sbox_sizer, 0, wx.ALL|wx.EXPAND, 2)
sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5)
panel.SetSizer(sizer)
w, h = sizer.Fit(self)
# self.SetSize((w, h*1.5))
# self.SetMinSize((w, h*1.5))
# self.SetMaxSize(sizer.Fit(self))
try:
self.SetIcon(theme.GetIcon("psu_png"))
except:
pass
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
def OnPsuSelected(self, event):
pass
# self.btn_add.Enable()
# self.lbl_error.SetLabel("")
def OnKeyUp(self, event):
key = event.GetKeyCode()
print(event)
if key == wx.KEY_ESCAPE:
self.EndModal(wx.ID_CANCEL)
def OnSpinInitial(self, event=None):
v0 = self.spin_initial.GetValue()
v1 = self.spin_initial2.GetValue()
label = str(v0) + "." + str(v1) + "v"
self.lbl_voltage.SetLabel(label)
def OnSpinFinal(self, event=None):
v0 = self.spin_final.GetValue()
v1 = self.spin_final2.GetValue()
label = str(v0) + "." + str(v1) + "v"
self.lbl_voltage2.SetLabel(label)
def OnSpinStep(self, event=None):
v0 = self.spin_step.GetValue()
v1 = self.spin_step2.GetValue()
label = str(v0) + "." + str(v1) + "v"
self.lbl_step2.SetLabel(label)
def OnSpinStepDelay(self, event=None):
s0 = self.spin_step_delay.GetValue()
s1 = self.spin_step_delay2.GetValue()
label = str(s0) + "." + str(s1) + "s"
self.lbl_step_delay.SetLabel(label)
def OnButton(self, event):
e = event.GetEventObject()
label = e.GetLabel()
id = e.GetId()
if label == "Cancel":
self.EndModal(id)
elif label == "Add":
self.EndModal(id)
def SetValue(self, data):
params = data["parameters"]
params = "), " + params[1:-1] + ", (" #so we can split it easier
param_dict = {}
params = params.split("), (")
for param in params:
param = param[1: -1]
if param == "":
continue
key, value = param.split("', '")
param_dict[key] = value
self.cbox_psu.SetValue(param_dict["psu"])
self.lbl_step_delay.SetLabel(param_dict["delay"])
self.lbl_step2.SetLabel(param_dict["step"])
self.lbl_voltage.SetLabel(param_dict["v1"])
self.lbl_voltage2.SetLabel(param_dict["v0"])
#increment delay
spin1, spin2 = param_dict["delay"][:-1].split(".")
self.spin_step_delay.SetValue(spin1)
self.spin_step_delay.SetValue(spin2)
#initial voltage
spin1, spin2 = param_dict["v0"][:-1].split(".")
self.spin_initial.SetValue(spin1)
self.spin_initial2.SetValue(spin2)
#final voltage
spin1, spin2 = param_dict["v1"][:-1].split(".")
self.spin_final.SetValue(spin1)
self.spin_final2.SetValue(spin2)
#increment set
spin1, spin2 = param_dict["step"][:-1].split(".")
self.spin_step.SetValue(spin1)
self.spin_step2.SetValue(spin2)
#
self.text_local.SetValue(data["local"])
self.text_global.SetValue(data["global"])
def GetValue(self):
data = [("psu", self.cbox_psu.GetValue()),
("v0", self.lbl_voltage.GetLabel()),
("v1", self.lbl_voltage2.GetLabel()),
("step", self.lbl_step2.GetLabel()),
("delay", self.lbl_step_delay.GetLabel())]
data = {"action":"Step Voltage",
"parameters":str(data)}
local = self.text_local.GetValue()
if local != "":
for char in local:
if char.isdigit() or char.isalpha():
continue
local = local.replace(char, "_")
data["local"] = local
glob = self.text_global.GetValue()
if glob != "":
for char in glob:
if char.isdigit() or char.isalpha():
continue
glob = glob.replace(char, "_")
data["global"] = glob
return data | mit | 7,247,116,515,581,154,000 | 37.452632 | 96 | 0.551926 | false |
ichuang/sympy | sympy/statistics/tests/test_statistics.py | 1 | 3177 | from sympy import sqrt, Rational, oo, Symbol, exp, pi
from sympy.functions import erf
from sympy.statistics.distributions import Normal, Uniform
from sympy.statistics.distributions import PDF
from operator import abs
from sympy.mpmath import mp
def test_normal():
dps, mp.dps = mp.dps, 20
N = Normal(0, 1)
assert N.random()
assert N.mean == 0
assert N.variance == 1
assert N.probability(-1, 1) == erf(1/sqrt(2))
assert N.probability(-1, 0) == erf(1/sqrt(2))/2
N = Normal(2, 4)
assert N.mean == 2
assert N.variance == 16
assert N.confidence(1) == (-oo, oo)
assert N.probability(1, 3) == erf(1/sqrt(32))
assert N.pdf(1).evalf() == (exp(Rational(-1,32)) / (4*sqrt(2*pi))).evalf()
for p in [0.1, 0.3, 0.7, 0.9, 0.995]:
a, b = N.confidence(p)
assert abs(float(N.probability(a, b).evalf()) - p) < 1e-10
N = Normal(0, 2/sqrt(2*pi))
assert N.pdf(0) == Rational(1,2)
mp.dps = dps
def test_uniform():
U = Uniform(-3, -1)
assert str(U) == "Uniform(-3, -1)"
assert repr(U) == "Uniform(-3, -1)"
x = U.random()
assert x < -1 and x > -3
assert U.mean == -2
assert U.confidence(1) == (-3, -1)
assert U.confidence(Rational(1,2)) == (Rational(-5,2), Rational(-3,2))
assert U.pdf(-4) == 0
assert U.pdf(-Rational(3,2)) == Rational(1,2)
assert U.pdf(0) == 0
assert U.cdf(-4) == 0
assert U.cdf(-Rational(3,2)) == Rational(3,4)
assert U.cdf(0) == 1
def test_fit():
import random
random.seed(1234)
n = Normal.fit(Uniform.fit(Normal(2, 1.5).random(1000)))
#print n.mean
#print n.stddev
assert abs(n.mean - 2) < 0.3
assert abs(n.stddev - 1.5) < 0.3
n = Normal.fit([1,2,3,4,5])
assert n.mean == 3
assert n.stddev == sqrt(2)
n = Uniform.fit([1,2,3,4,5])
assert n.mean == 3
assert n.stddev == sqrt(2)
def test_sample():
from sympy.statistics.distributions import Sample
s = Sample([0,1])
assert str(s) == "Sample([0, 1])"
assert repr(s) == "Sample([0, 1])"
assert s.mean == Rational(1,2)
assert s.median == Rational(1,2)
s = Sample([4,2,3])
assert s == Sample([2, 3, 4])
assert s.median == 3
s = Sample([4,2,3,1])
assert s.median == Rational(5,2)
def test_PDF():
a = Symbol('a', positive=True)
x = Symbol('x', real=True)
exponential = PDF(exp(-x/a), (x,0,oo))
exponential = exponential.normalize()
assert exponential.pdf(x) == 1/a*exp(-x/a)
assert exponential.cdf(x) == 1 - exp(-x/a)
assert exponential.mean == a
assert exponential.variance == a**2
assert exponential.stddev == a
exponential = PDF(exp(-x/a), x)
assert exponential.pdf(x) == exp(-x/a)
assert exponential.cdf(x) == -a*exp(-x/a) + oo
assert exponential.mean == -oo
exponential = PDF(1, (x,1,2))
assert exponential.normalize() == exponential
assert exponential._get_stddev() == sqrt(3)/6
assert exponential._get_stddev() == sqrt(3)/6
#This test is intentionally repeated to test PDF._get_stddev() properly.
exponential = exponential.transform(x, x)
assert exponential.pdf(x) == 1
assert exponential.cdf(x) == x - 1
| bsd-3-clause | 7,874,197,552,083,216,000 | 31.752577 | 78 | 0.593327 | false |
PaskoMoto/fermatic | monitor_dbus-1.0.py | 1 | 2367 | # Fernando Pascual Sesma. Abril de 2016
#!/usr/bin/python3
from gi.repository import GLib
import time
import datetime
import os
import sys
import dbus
import dbus.glib
OBJ_PATH = '/ril_0'
INTERFACE = 'org.ofono.NetworkRegistration'
SERVICE = 'org.ofono'
LOG = open('/home/nemo/fermatic/log.txt','a')
CellIds = [line.rstrip('\n') for line in open('/home/nemo/fermatic/cellid.txt')]
HOMENETWORK = True
def handler(member=None,sender=None,path=None):
global HOMENETWORK
if member == 'CellId':
LOG.write("_***_"+datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d,%H:%M:%S')+":"+str(sender)+"_***_\n")
try:
index = CellIds.index(str(sender))
if HOMENETWORK:
pass
else:
LOG.write ("Tenemos que cambiar a indoors"+"\n")
os.system("dbus-send --system --print-reply --dest=net.connman /net/connman/service/cellular_YOUR_IMSI_NUMBER_HERE_context1 net.connman.Service.SetProperty string:AutoConnect variant:boolean:false")
os.system("dbus-send --system --print-reply --dest=net.connman /net/connman/technology/wifi net.connman.Technology.SetProperty string:Powered variant:boolean:true")
# os.system("killall openvpn")
HOMENETWORK = True
except ValueError:
if not HOMENETWORK:
pass
else:
LOG.write ("Tenemos que cambiar a outdoors"+"\n")
# os.system("killall openvpn")
os.system("dbus-send --system --print-reply --dest=net.connman /net/connman/service/cellular_YOUR_IMSI_NUMBER_HERE_context1 net.connman.Service.SetProperty string:AutoConnect variant:boolean:true")
os.system("dbus-send --system --print-reply --dest=net.connman /net/connman/technology/wifi net.connman.Technology.SetProperty string:Powered variant:boolean:false")
# os.system("/usr/sbin/openvpn --user nobody --group nobody --config /etc/openvpn/Jolla.conf --dev p2p5 --dev-type tap --verb 4")
LOG.write("_***_"+datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d,%H:%M:%S')+":"+str(sender)+"_***_\n")
HOMENETWORK = False
LOG.flush()
bus = dbus.SystemBus()
proxy_obj = bus.get_object(SERVICE, OBJ_PATH)
dbus_iface = dbus.Interface(proxy_obj, INTERFACE)
dbus_iface.connect_to_signal('PropertyChanged', handler, path_keyword='path')
mainloop = GLib.MainLoop()
mainloop.run()
| gpl-3.0 | -6,474,788,818,252,318,000 | 47.306122 | 208 | 0.680186 | false |
GoogleCloudPlatform/repo-automation-playground | xunit-autolabeler-v2/ast_parser/core/polyglot_drift_data.py | 1 | 1183 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
from recordclass import RecordClass
class PolyglotDriftData(RecordClass):
"""Struct for storing snippet metadata
This object stores language-agnostic ("polyglot")
snippet data extracted from snippet source files
for use by the second-stage "polyglot" parser.
"""
name: str
class_name: str
method_name: str
source_path: str
start_line: int
end_line: int
parser: str
region_tags: List[str] = []
test_methods: List[Tuple[str, str]] = []
children: List[str] = []
url: str = None
http_methods: List[str] = []
| apache-2.0 | -7,444,638,999,246,896,000 | 30.131579 | 74 | 0.709214 | false |
Psychedelic-Engineering/sleep-machine | hardware/channel.py | 1 | 2995 | from collections import deque
import math
import numpy as np
from scipy import signal
class Channel:
def __init__(self, name, min, max, maxNum, offset=0.0):
self.name = name
self.min = min
self.max = max
self.num = 0
self.sum = 0
self.buffersum = 0
self.size = maxNum
self.buffer = deque(maxlen=maxNum)
self.offset = offset
self.npBufferSize = 800
self.npBufferPos = 0
self.npBuffer = np.zeros(self.npBufferSize)
self.lastVal = 0
def __repr__(self):
return "%s (%.1f-%.1f)" % (self.name, self.min, self.max)
def calibrate(self):
self.offset = -self.buffersum / min(self.size, self.num)
def smooth(self, x,beta):
window_len=50
sampleRate = 10
cutOff = 0.01
fir_coeff = signal.firwin(window_len, cutOff)
smoothed = signal.lfilter(fir_coeff, 1.0, self.npBuffer)
return smoothed
def putValue(self, value):
# deque buffer
if self.num >= self.size:
self.buffersum -= self.buffer[0]
newValue = value
self.buffersum += newValue
self.buffer.append(newValue)
self.num += 1
self.sum += newValue
"""
# numpy buffer
self.npBufferPos += 1
if self.npBufferPos >= self.npBufferSize:
self.npBufferPos = 0
self.smoothed = self.smooth(self.npBuffer, 1)
self.gradient = np.diff(self.npBuffer)
try:
self.onUpdate(self)
except:
#raise
pass
self.npBuffer[self.npBufferPos] = value
"""
# Auto Calibration
#if self.num % 100 == 0:
# self.calibrate()
def calibrate(self):
self.offset = -self.buffer[-1]
def getValue(self):
#if self.num > 0:
return self.buffer[-1] + self.offset
def getAvg(self):
return self.sum / self.num + self.offset
def getBufferAvg(self):
val = self.buffer[-1] # current value
avg = self.buffersum / min(self.size, self.num) # moving average
mix = 0.5 * val + 0.5 * avg # weighted average
dif = math.pow((val - avg) / 20, 5) # differential
rng = 0
#for i in self.buffer:
# rng = max(abs(avg-i), rng)
#return rng
return avg + self.offset
if dif > 50:
#self.buffersum = val * self.size
return val + self.offset
else:
return avg + self.offset
def getRng(self):
rng = 0
der = 0
avg = self.buffersum / min(self.size, self.num)
for i in self.buffer:
#rng = 0.01 * max(pow(avg - i, 4), rng)
der = der + pow((avg - i) / 4, 2)
#der = der + abs(avg-i)
der /= self.size
return der
def getDeriv(self):
val = self.buffer[-1] # current value
avg = self.buffersum / min(self.size, self.num) # moving average
mix = 0.5 * val + 0.5 * avg # weighted average
dif = avg - val
#dif = 5 * math.pow(dif / 20, 6) # differential
return dif
def getDiff(self):
avg = self.buffersum / min(self.size, self.num)
result = avg - self.lastVal
self.lastVal = avg
#return math.pow(result, 2)
if self.num>2:
result = self.buffer[-1] - self.buffer[-2]
else:
result = 0
return math.pow(result, 4) | mit | -5,301,480,919,418,147,000 | 23.760331 | 68 | 0.620033 | false |
dscho/hg | mercurial/wireproto.py | 1 | 32128 | # wireproto.py - generic wire protocol support functions
#
# Copyright 2005-2010 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import hashlib
import itertools
import os
import sys
import tempfile
from .i18n import _
from .node import (
bin,
hex,
)
from . import (
bundle2,
changegroup as changegroupmod,
encoding,
error,
exchange,
peer,
pushkey as pushkeymod,
streamclone,
util,
)
urlerr = util.urlerr
urlreq = util.urlreq
bundle2required = _(
'incompatible Mercurial client; bundle2 required\n'
'(see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n')
class abstractserverproto(object):
"""abstract class that summarizes the protocol API
Used as reference and documentation.
"""
def getargs(self, args):
"""return the value for arguments in <args>
returns a list of values (same order as <args>)"""
raise NotImplementedError()
def getfile(self, fp):
"""write the whole content of a file into a file like object
The file is in the form::
(<chunk-size>\n<chunk>)+0\n
chunk size is the ascii version of the int.
"""
raise NotImplementedError()
def redirect(self):
"""may setup interception for stdout and stderr
See also the `restore` method."""
raise NotImplementedError()
# If the `redirect` function does install interception, the `restore`
# function MUST be defined. If interception is not used, this function
# MUST NOT be defined.
#
# left commented here on purpose
#
#def restore(self):
# """reinstall previous stdout and stderr and return intercepted stdout
# """
# raise NotImplementedError()
def groupchunks(self, cg):
"""return 4096 chunks from a changegroup object
Some protocols may have compressed the contents."""
raise NotImplementedError()
class remotebatch(peer.batcher):
'''batches the queued calls; uses as few roundtrips as possible'''
def __init__(self, remote):
'''remote must support _submitbatch(encbatch) and
_submitone(op, encargs)'''
peer.batcher.__init__(self)
self.remote = remote
def submit(self):
req, rsp = [], []
for name, args, opts, resref in self.calls:
mtd = getattr(self.remote, name)
batchablefn = getattr(mtd, 'batchable', None)
if batchablefn is not None:
batchable = batchablefn(mtd.im_self, *args, **opts)
encargsorres, encresref = next(batchable)
if encresref:
req.append((name, encargsorres,))
rsp.append((batchable, encresref, resref,))
else:
resref.set(encargsorres)
else:
if req:
self._submitreq(req, rsp)
req, rsp = [], []
resref.set(mtd(*args, **opts))
if req:
self._submitreq(req, rsp)
def _submitreq(self, req, rsp):
encresults = self.remote._submitbatch(req)
for encres, r in zip(encresults, rsp):
batchable, encresref, resref = r
encresref.set(encres)
resref.set(next(batchable))
class remoteiterbatcher(peer.iterbatcher):
def __init__(self, remote):
super(remoteiterbatcher, self).__init__()
self._remote = remote
def __getattr__(self, name):
if not getattr(self._remote, name, False):
raise AttributeError(
'Attempted to iterbatch non-batchable call to %r' % name)
return super(remoteiterbatcher, self).__getattr__(name)
def submit(self):
"""Break the batch request into many patch calls and pipeline them.
This is mostly valuable over http where request sizes can be
limited, but can be used in other places as well.
"""
req, rsp = [], []
for name, args, opts, resref in self.calls:
mtd = getattr(self._remote, name)
batchable = mtd.batchable(mtd.im_self, *args, **opts)
encargsorres, encresref = next(batchable)
assert encresref
req.append((name, encargsorres))
rsp.append((batchable, encresref))
if req:
self._resultiter = self._remote._submitbatch(req)
self._rsp = rsp
def results(self):
for (batchable, encresref), encres in itertools.izip(
self._rsp, self._resultiter):
encresref.set(encres)
yield next(batchable)
# Forward a couple of names from peer to make wireproto interactions
# slightly more sensible.
batchable = peer.batchable
future = peer.future
# list of nodes encoding / decoding
def decodelist(l, sep=' '):
if l:
return map(bin, l.split(sep))
return []
def encodelist(l, sep=' '):
try:
return sep.join(map(hex, l))
except TypeError:
raise
# batched call argument encoding
def escapearg(plain):
return (plain
.replace(':', ':c')
.replace(',', ':o')
.replace(';', ':s')
.replace('=', ':e'))
def unescapearg(escaped):
return (escaped
.replace(':e', '=')
.replace(':s', ';')
.replace(':o', ',')
.replace(':c', ':'))
# mapping of options accepted by getbundle and their types
#
# Meant to be extended by extensions. It is extensions responsibility to ensure
# such options are properly processed in exchange.getbundle.
#
# supported types are:
#
# :nodes: list of binary nodes
# :csv: list of comma-separated values
# :scsv: list of comma-separated values return as set
# :plain: string with no transformation needed.
gboptsmap = {'heads': 'nodes',
'common': 'nodes',
'obsmarkers': 'boolean',
'bundlecaps': 'scsv',
'listkeys': 'csv',
'cg': 'boolean',
'cbattempted': 'boolean'}
# client side
class wirepeer(peer.peerrepository):
"""Client-side interface for communicating with a peer repository.
Methods commonly call wire protocol commands of the same name.
See also httppeer.py and sshpeer.py for protocol-specific
implementations of this interface.
"""
def batch(self):
if self.capable('batch'):
return remotebatch(self)
else:
return peer.localbatch(self)
def _submitbatch(self, req):
"""run batch request <req> on the server
Returns an iterator of the raw responses from the server.
"""
cmds = []
for op, argsdict in req:
args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
for k, v in argsdict.iteritems())
cmds.append('%s %s' % (op, args))
rsp = self._callstream("batch", cmds=';'.join(cmds))
chunk = rsp.read(1024)
work = [chunk]
while chunk:
while ';' not in chunk and chunk:
chunk = rsp.read(1024)
work.append(chunk)
merged = ''.join(work)
while ';' in merged:
one, merged = merged.split(';', 1)
yield unescapearg(one)
chunk = rsp.read(1024)
work = [merged, chunk]
yield unescapearg(''.join(work))
def _submitone(self, op, args):
return self._call(op, **args)
def iterbatch(self):
return remoteiterbatcher(self)
@batchable
def lookup(self, key):
self.requirecap('lookup', _('look up remote revision'))
f = future()
yield {'key': encoding.fromlocal(key)}, f
d = f.value
success, data = d[:-1].split(" ", 1)
if int(success):
yield bin(data)
self._abort(error.RepoError(data))
@batchable
def heads(self):
f = future()
yield {}, f
d = f.value
try:
yield decodelist(d[:-1])
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), d))
@batchable
def known(self, nodes):
f = future()
yield {'nodes': encodelist(nodes)}, f
d = f.value
try:
yield [bool(int(b)) for b in d]
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), d))
@batchable
def branchmap(self):
f = future()
yield {}, f
d = f.value
try:
branchmap = {}
for branchpart in d.splitlines():
branchname, branchheads = branchpart.split(' ', 1)
branchname = encoding.tolocal(urlreq.unquote(branchname))
branchheads = decodelist(branchheads)
branchmap[branchname] = branchheads
yield branchmap
except TypeError:
self._abort(error.ResponseError(_("unexpected response:"), d))
def branches(self, nodes):
n = encodelist(nodes)
d = self._call("branches", nodes=n)
try:
br = [tuple(decodelist(b)) for b in d.splitlines()]
return br
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), d))
def between(self, pairs):
batch = 8 # avoid giant requests
r = []
for i in xrange(0, len(pairs), batch):
n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
d = self._call("between", pairs=n)
try:
r.extend(l and decodelist(l) or [] for l in d.splitlines())
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), d))
return r
@batchable
def pushkey(self, namespace, key, old, new):
if not self.capable('pushkey'):
yield False, None
f = future()
self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
yield {'namespace': encoding.fromlocal(namespace),
'key': encoding.fromlocal(key),
'old': encoding.fromlocal(old),
'new': encoding.fromlocal(new)}, f
d = f.value
d, output = d.split('\n', 1)
try:
d = bool(int(d))
except ValueError:
raise error.ResponseError(
_('push failed (unexpected response):'), d)
for l in output.splitlines(True):
self.ui.status(_('remote: '), l)
yield d
@batchable
def listkeys(self, namespace):
if not self.capable('pushkey'):
yield {}, None
f = future()
self.ui.debug('preparing listkeys for "%s"\n' % namespace)
yield {'namespace': encoding.fromlocal(namespace)}, f
d = f.value
self.ui.debug('received listkey for "%s": %i bytes\n'
% (namespace, len(d)))
yield pushkeymod.decodekeys(d)
def stream_out(self):
return self._callstream('stream_out')
def changegroup(self, nodes, kind):
n = encodelist(nodes)
f = self._callcompressable("changegroup", roots=n)
return changegroupmod.cg1unpacker(f, 'UN')
def changegroupsubset(self, bases, heads, kind):
self.requirecap('changegroupsubset', _('look up remote changes'))
bases = encodelist(bases)
heads = encodelist(heads)
f = self._callcompressable("changegroupsubset",
bases=bases, heads=heads)
return changegroupmod.cg1unpacker(f, 'UN')
def getbundle(self, source, **kwargs):
self.requirecap('getbundle', _('look up remote changes'))
opts = {}
bundlecaps = kwargs.get('bundlecaps')
if bundlecaps is not None:
kwargs['bundlecaps'] = sorted(bundlecaps)
else:
bundlecaps = () # kwargs could have it to None
for key, value in kwargs.iteritems():
if value is None:
continue
keytype = gboptsmap.get(key)
if keytype is None:
assert False, 'unexpected'
elif keytype == 'nodes':
value = encodelist(value)
elif keytype in ('csv', 'scsv'):
value = ','.join(value)
elif keytype == 'boolean':
value = '%i' % bool(value)
elif keytype != 'plain':
raise KeyError('unknown getbundle option type %s'
% keytype)
opts[key] = value
f = self._callcompressable("getbundle", **opts)
if any((cap.startswith('HG2') for cap in bundlecaps)):
return bundle2.getunbundler(self.ui, f)
else:
return changegroupmod.cg1unpacker(f, 'UN')
def unbundle(self, cg, heads, source):
'''Send cg (a readable file-like object representing the
changegroup to push, typically a chunkbuffer object) to the
remote server as a bundle.
When pushing a bundle10 stream, return an integer indicating the
result of the push (see localrepository.addchangegroup()).
When pushing a bundle20 stream, return a bundle20 stream.'''
if heads != ['force'] and self.capable('unbundlehash'):
heads = encodelist(['hashed',
hashlib.sha1(''.join(sorted(heads))).digest()])
else:
heads = encodelist(heads)
if util.safehasattr(cg, 'deltaheader'):
# this a bundle10, do the old style call sequence
ret, output = self._callpush("unbundle", cg, heads=heads)
if ret == "":
raise error.ResponseError(
_('push failed:'), output)
try:
ret = int(ret)
except ValueError:
raise error.ResponseError(
_('push failed (unexpected response):'), ret)
for l in output.splitlines(True):
self.ui.status(_('remote: '), l)
else:
# bundle2 push. Send a stream, fetch a stream.
stream = self._calltwowaystream('unbundle', cg, heads=heads)
ret = bundle2.getunbundler(self.ui, stream)
return ret
def debugwireargs(self, one, two, three=None, four=None, five=None):
# don't pass optional arguments left at their default value
opts = {}
if three is not None:
opts['three'] = three
if four is not None:
opts['four'] = four
return self._call('debugwireargs', one=one, two=two, **opts)
def _call(self, cmd, **args):
"""execute <cmd> on the server
The command is expected to return a simple string.
returns the server reply as a string."""
raise NotImplementedError()
def _callstream(self, cmd, **args):
"""execute <cmd> on the server
The command is expected to return a stream. Note that if the
command doesn't return a stream, _callstream behaves
differently for ssh and http peers.
returns the server reply as a file like object.
"""
raise NotImplementedError()
def _callcompressable(self, cmd, **args):
"""execute <cmd> on the server
The command is expected to return a stream.
The stream may have been compressed in some implementations. This
function takes care of the decompression. This is the only difference
with _callstream.
returns the server reply as a file like object.
"""
raise NotImplementedError()
def _callpush(self, cmd, fp, **args):
"""execute a <cmd> on server
The command is expected to be related to a push. Push has a special
return method.
returns the server reply as a (ret, output) tuple. ret is either
empty (error) or a stringified int.
"""
raise NotImplementedError()
def _calltwowaystream(self, cmd, fp, **args):
"""execute <cmd> on server
The command will send a stream to the server and get a stream in reply.
"""
raise NotImplementedError()
def _abort(self, exception):
"""clearly abort the wire protocol connection and raise the exception
"""
raise NotImplementedError()
# server side
# wire protocol command can either return a string or one of these classes.
class streamres(object):
"""wireproto reply: binary stream
The call was successful and the result is a stream.
Iterate on the `self.gen` attribute to retrieve chunks.
"""
def __init__(self, gen):
self.gen = gen
class pushres(object):
"""wireproto reply: success with simple integer return
The call was successful and returned an integer contained in `self.res`.
"""
def __init__(self, res):
self.res = res
class pusherr(object):
"""wireproto reply: failure
The call failed. The `self.res` attribute contains the error message.
"""
def __init__(self, res):
self.res = res
class ooberror(object):
"""wireproto reply: failure of a batch of operation
Something failed during a batch call. The error message is stored in
`self.message`.
"""
def __init__(self, message):
self.message = message
def getdispatchrepo(repo, proto, command):
"""Obtain the repo used for processing wire protocol commands.
The intent of this function is to serve as a monkeypatch point for
extensions that need commands to operate on different repo views under
specialized circumstances.
"""
return repo.filtered('served')
def dispatch(repo, proto, command):
repo = getdispatchrepo(repo, proto, command)
func, spec = commands[command]
args = proto.getargs(spec)
return func(repo, proto, *args)
def options(cmd, keys, others):
opts = {}
for k in keys:
if k in others:
opts[k] = others[k]
del others[k]
if others:
sys.stderr.write("warning: %s ignored unexpected arguments %s\n"
% (cmd, ",".join(others)))
return opts
def bundle1allowed(repo, action):
"""Whether a bundle1 operation is allowed from the server.
Priority is:
1. server.bundle1gd.<action> (if generaldelta active)
2. server.bundle1.<action>
3. server.bundle1gd (if generaldelta active)
4. server.bundle1
"""
ui = repo.ui
gd = 'generaldelta' in repo.requirements
if gd:
v = ui.configbool('server', 'bundle1gd.%s' % action, None)
if v is not None:
return v
v = ui.configbool('server', 'bundle1.%s' % action, None)
if v is not None:
return v
if gd:
v = ui.configbool('server', 'bundle1gd', None)
if v is not None:
return v
return ui.configbool('server', 'bundle1', True)
# list of commands
commands = {}
def wireprotocommand(name, args=''):
"""decorator for wire protocol command"""
def register(func):
commands[name] = (func, args)
return func
return register
@wireprotocommand('batch', 'cmds *')
def batch(repo, proto, cmds, others):
repo = repo.filtered("served")
res = []
for pair in cmds.split(';'):
op, args = pair.split(' ', 1)
vals = {}
for a in args.split(','):
if a:
n, v = a.split('=')
vals[n] = unescapearg(v)
func, spec = commands[op]
if spec:
keys = spec.split()
data = {}
for k in keys:
if k == '*':
star = {}
for key in vals.keys():
if key not in keys:
star[key] = vals[key]
data['*'] = star
else:
data[k] = vals[k]
result = func(repo, proto, *[data[k] for k in keys])
else:
result = func(repo, proto)
if isinstance(result, ooberror):
return result
res.append(escapearg(result))
return ';'.join(res)
@wireprotocommand('between', 'pairs')
def between(repo, proto, pairs):
pairs = [decodelist(p, '-') for p in pairs.split(" ")]
r = []
for b in repo.between(pairs):
r.append(encodelist(b) + "\n")
return "".join(r)
@wireprotocommand('branchmap')
def branchmap(repo, proto):
branchmap = repo.branchmap()
heads = []
for branch, nodes in branchmap.iteritems():
branchname = urlreq.quote(encoding.fromlocal(branch))
branchnodes = encodelist(nodes)
heads.append('%s %s' % (branchname, branchnodes))
return '\n'.join(heads)
@wireprotocommand('branches', 'nodes')
def branches(repo, proto, nodes):
nodes = decodelist(nodes)
r = []
for b in repo.branches(nodes):
r.append(encodelist(b) + "\n")
return "".join(r)
@wireprotocommand('clonebundles', '')
def clonebundles(repo, proto):
"""Server command for returning info for available bundles to seed clones.
Clients will parse this response and determine what bundle to fetch.
Extensions may wrap this command to filter or dynamically emit data
depending on the request. e.g. you could advertise URLs for the closest
data center given the client's IP address.
"""
return repo.opener.tryread('clonebundles.manifest')
wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
'known', 'getbundle', 'unbundlehash', 'batch']
def _capabilities(repo, proto):
"""return a list of capabilities for a repo
This function exists to allow extensions to easily wrap capabilities
computation
- returns a lists: easy to alter
- change done here will be propagated to both `capabilities` and `hello`
command without any other action needed.
"""
# copy to prevent modification of the global list
caps = list(wireprotocaps)
if streamclone.allowservergeneration(repo.ui):
if repo.ui.configbool('server', 'preferuncompressed', False):
caps.append('stream-preferred')
requiredformats = repo.requirements & repo.supportedformats
# if our local revlogs are just revlogv1, add 'stream' cap
if not requiredformats - set(('revlogv1',)):
caps.append('stream')
# otherwise, add 'streamreqs' detailing our local revlog format
else:
caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
if repo.ui.configbool('experimental', 'bundle2-advertise', True):
capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
caps.append('bundle2=' + urlreq.quote(capsblob))
caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
caps.append(
'httpheader=%d' % repo.ui.configint('server', 'maxhttpheaderlen', 1024))
if repo.ui.configbool('experimental', 'httppostargs', False):
caps.append('httppostargs')
return caps
# If you are writing an extension and consider wrapping this function. Wrap
# `_capabilities` instead.
@wireprotocommand('capabilities')
def capabilities(repo, proto):
return ' '.join(_capabilities(repo, proto))
@wireprotocommand('changegroup', 'roots')
def changegroup(repo, proto, roots):
nodes = decodelist(roots)
cg = changegroupmod.changegroup(repo, nodes, 'serve')
return streamres(proto.groupchunks(cg))
@wireprotocommand('changegroupsubset', 'bases heads')
def changegroupsubset(repo, proto, bases, heads):
bases = decodelist(bases)
heads = decodelist(heads)
cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
return streamres(proto.groupchunks(cg))
@wireprotocommand('debugwireargs', 'one two *')
def debugwireargs(repo, proto, one, two, others):
# only accept optional args from the known set
opts = options('debugwireargs', ['three', 'four'], others)
return repo.debugwireargs(one, two, **opts)
# List of options accepted by getbundle.
#
# Meant to be extended by extensions. It is the extension's responsibility to
# ensure such options are properly processed in exchange.getbundle.
gboptslist = ['heads', 'common', 'bundlecaps']
@wireprotocommand('getbundle', '*')
def getbundle(repo, proto, others):
opts = options('getbundle', gboptsmap.keys(), others)
for k, v in opts.iteritems():
keytype = gboptsmap[k]
if keytype == 'nodes':
opts[k] = decodelist(v)
elif keytype == 'csv':
opts[k] = list(v.split(','))
elif keytype == 'scsv':
opts[k] = set(v.split(','))
elif keytype == 'boolean':
# Client should serialize False as '0', which is a non-empty string
# so it evaluates as a True bool.
if v == '0':
opts[k] = False
else:
opts[k] = bool(v)
elif keytype != 'plain':
raise KeyError('unknown getbundle option type %s'
% keytype)
if not bundle1allowed(repo, 'pull'):
if not exchange.bundle2requested(opts.get('bundlecaps')):
return ooberror(bundle2required)
cg = exchange.getbundle(repo, 'serve', **opts)
return streamres(proto.groupchunks(cg))
@wireprotocommand('heads')
def heads(repo, proto):
h = repo.heads()
return encodelist(h) + "\n"
@wireprotocommand('hello')
def hello(repo, proto):
'''the hello command returns a set of lines describing various
interesting things about the server, in an RFC822-like format.
Currently the only one defined is "capabilities", which
consists of a line in the form:
capabilities: space separated list of tokens
'''
return "capabilities: %s\n" % (capabilities(repo, proto))
@wireprotocommand('listkeys', 'namespace')
def listkeys(repo, proto, namespace):
d = repo.listkeys(encoding.tolocal(namespace)).items()
return pushkeymod.encodekeys(d)
@wireprotocommand('lookup', 'key')
def lookup(repo, proto, key):
try:
k = encoding.tolocal(key)
c = repo[k]
r = c.hex()
success = 1
except Exception as inst:
r = str(inst)
success = 0
return "%s %s\n" % (success, r)
@wireprotocommand('known', 'nodes *')
def known(repo, proto, nodes, others):
return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
@wireprotocommand('pushkey', 'namespace key old new')
def pushkey(repo, proto, namespace, key, old, new):
# compatibility with pre-1.8 clients which were accidentally
# sending raw binary nodes rather than utf-8-encoded hex
if len(new) == 20 and new.encode('string-escape') != new:
# looks like it could be a binary node
try:
new.decode('utf-8')
new = encoding.tolocal(new) # but cleanly decodes as UTF-8
except UnicodeDecodeError:
pass # binary, leave unmodified
else:
new = encoding.tolocal(new) # normal path
if util.safehasattr(proto, 'restore'):
proto.redirect()
try:
r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
encoding.tolocal(old), new) or False
except error.Abort:
r = False
output = proto.restore()
return '%s\n%s' % (int(r), output)
r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
encoding.tolocal(old), new)
return '%s\n' % int(r)
@wireprotocommand('stream_out')
def stream(repo, proto):
'''If the server supports streaming clone, it advertises the "stream"
capability with a value representing the version and flags of the repo
it is serving. Client checks to see if it understands the format.
'''
if not streamclone.allowservergeneration(repo.ui):
return '1\n'
def getstream(it):
yield '0\n'
for chunk in it:
yield chunk
try:
# LockError may be raised before the first result is yielded. Don't
# emit output until we're sure we got the lock successfully.
it = streamclone.generatev1wireproto(repo)
return streamres(getstream(it))
except error.LockError:
return '2\n'
@wireprotocommand('unbundle', 'heads')
def unbundle(repo, proto, heads):
their_heads = decodelist(heads)
try:
proto.redirect()
exchange.check_heads(repo, their_heads, 'preparing changes')
# write bundle data to temporary file because it can be big
fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
fp = os.fdopen(fd, 'wb+')
r = 0
try:
proto.getfile(fp)
fp.seek(0)
gen = exchange.readbundle(repo.ui, fp, None)
if (isinstance(gen, changegroupmod.cg1unpacker)
and not bundle1allowed(repo, 'push')):
return ooberror(bundle2required)
r = exchange.unbundle(repo, gen, their_heads, 'serve',
proto._client())
if util.safehasattr(r, 'addpart'):
# The return looks streamable, we are in the bundle2 case and
# should return a stream.
return streamres(r.getchunks())
return pushres(r)
finally:
fp.close()
os.unlink(tempname)
except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
# handle non-bundle2 case first
if not getattr(exc, 'duringunbundle2', False):
try:
raise
except error.Abort:
# The old code we moved used sys.stderr directly.
# We did not change it to minimise code change.
# This need to be moved to something proper.
# Feel free to do it.
sys.stderr.write("abort: %s\n" % exc)
return pushres(0)
except error.PushRaced:
return pusherr(str(exc))
bundler = bundle2.bundle20(repo.ui)
for out in getattr(exc, '_bundle2salvagedoutput', ()):
bundler.addpart(out)
try:
try:
raise
except error.PushkeyFailed as exc:
# check client caps
remotecaps = getattr(exc, '_replycaps', None)
if (remotecaps is not None
and 'pushkey' not in remotecaps.get('error', ())):
# no support remote side, fallback to Abort handler.
raise
part = bundler.newpart('error:pushkey')
part.addparam('in-reply-to', exc.partid)
if exc.namespace is not None:
part.addparam('namespace', exc.namespace, mandatory=False)
if exc.key is not None:
part.addparam('key', exc.key, mandatory=False)
if exc.new is not None:
part.addparam('new', exc.new, mandatory=False)
if exc.old is not None:
part.addparam('old', exc.old, mandatory=False)
if exc.ret is not None:
part.addparam('ret', exc.ret, mandatory=False)
except error.BundleValueError as exc:
errpart = bundler.newpart('error:unsupportedcontent')
if exc.parttype is not None:
errpart.addparam('parttype', exc.parttype)
if exc.params:
errpart.addparam('params', '\0'.join(exc.params))
except error.Abort as exc:
manargs = [('message', str(exc))]
advargs = []
if exc.hint is not None:
advargs.append(('hint', exc.hint))
bundler.addpart(bundle2.bundlepart('error:abort',
manargs, advargs))
except error.PushRaced as exc:
bundler.newpart('error:pushraced', [('message', str(exc))])
return streamres(bundler.getchunks())
| gpl-2.0 | 8,150,214,625,042,414,000 | 32.890295 | 80 | 0.58491 | false |
COL-IU/XLSearch | library/MZXMLReader.py | 1 | 1872 | import xml.etree.ElementTree as et
import base64
import struct
from Spectrum import *
class MZXMLReader:
def __init__(self, fileName):
self.fileName = fileName
self.baseName = fileName[:fileName.index('.')].split('/')[-1]
def getSpectraList(self, mass, param):
fileName = self.fileName
baseName = self.baseName
basepeakInt = param['basepeakint']
dynamicRange = param['dynamicrange']
xmlObj = et.parse(fileName)
root = xmlObj.getroot()
children = root.getchildren()
children = children[0].getchildren()
spectra = []
for i in range(0, len(children)):
if children[i].tag[-4:] != 'scan':
continue
scanNum = children[i].attrib['num']
retentionTime = int(float(children[i].attrib['retentionTime'][2:-1]))
info = children[i].getchildren()
for j in range(0, len(info)):
if info[j].tag[-11:] == 'precursorMz':
ch = int(info[j].attrib['precursorCharge'])
precursorMZ = float(info[j].text)
elif info[j].tag[-5:] == 'peaks':
base64Peaklist = info[j].text
data = base64.b64decode(base64Peaklist)
if len(data) % 8 != 0:
print 'MZXMLReader: incorrect format of peak content'
numPeaks = len(data) / 8
mz = []
it = []
for k in range(0, numPeaks):
val = data[(k * 8 + 0) : (k * 8 + 4)]
val = val[::-1]
mz.append(struct.unpack('f', val)[0])
val = data[(k * 8 + 4) : (k * 8 + 8)]
val = val[::-1]
it.append(struct.unpack('f', val)[0])
maxInt = max(it)
peaks = zip(mz, it)
peaks = filter(lambda x:x[1] >= dynamicRange * maxInt, peaks)
peaks = zip(*peaks)
mz = list(peaks[0]);
it = list(peaks[1]);
it = map(lambda x : x * basepeakInt / (maxInt), it)
title = baseName + '.' + scanNum + '.' + str(ch)
spectra.append(Spectrum(title, scanNum, precursorMZ, ch, mz, it, retentionTime, mass))
return spectra
| mit | 1,492,171,613,617,418,800 | 28.25 | 89 | 0.605769 | false |
MiniSEC/GRR_clone | lib/communicator.py | 1 | 19629 | #!/usr/bin/env python
"""Abstracts encryption and authentication."""
import hashlib
import time
import zlib
from M2Crypto import BIO
from M2Crypto import EVP
from M2Crypto import Rand
from M2Crypto import RSA
from M2Crypto import X509
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import type_info
from grr.lib import utils
config_lib.DEFINE_integer("Network.api", 3,
"The version of the network protocol the client "
"uses.")
config_lib.DEFINE_string("Network.compression", default="ZCOMPRESS",
help="Type of compression (ZCOMPRESS, UNCOMPRESSED)")
# Constants.
ENCRYPT = 1
DECRYPT = 0
class CommunicatorInit(registry.InitHook):
pre = ["StatsInit"]
def RunOnce(self):
"""This is run only once."""
# Initialize the PRNG.
Rand.rand_seed(Rand.rand_bytes(1000))
# Counters used here
stats.STATS.RegisterCounterMetric("grr_client_unknown")
stats.STATS.RegisterCounterMetric("grr_decoding_error")
stats.STATS.RegisterCounterMetric("grr_decryption_error")
stats.STATS.RegisterCounterMetric("grr_rekey_error")
stats.STATS.RegisterCounterMetric("grr_authenticated_messages")
stats.STATS.RegisterCounterMetric("grr_unauthenticated_messages")
stats.STATS.RegisterCounterMetric("grr_rsa_operations")
class Error(stats.CountingExceptionMixin, Exception):
"""Base class for all exceptions in this module."""
pass
class DecodingError(Error):
"""Raised when the message failed to decrypt or decompress."""
counter = "grr_decoding_error"
class DecryptionError(DecodingError):
"""Raised when the message can not be decrypted properly."""
counter = "grr_decryption_error"
class RekeyError(DecodingError):
"""Raised when the session key is not known and rekeying is needed."""
counter = "grr_rekey_error"
class UnknownClientCert(DecodingError):
"""Raised when the client key is not retrieved."""
counter = "grr_client_unknown"
class PubKeyCache(object):
"""A cache of public keys for different destinations."""
def __init__(self):
self.pub_key_cache = utils.FastStore(max_size=50000)
@staticmethod
def GetCNFromCert(cert):
subject = cert.get_subject()
try:
cn_id = subject.nid["CN"]
cn = subject.get_entries_by_nid(cn_id)[0]
except IndexError:
raise IOError("Cert has no CN")
return rdfvalue.RDFURN(cn.get_data().as_text())
@staticmethod
def PubKeyFromCert(cert):
pub_key = cert.get_pubkey().get_rsa()
bio = BIO.MemoryBuffer()
pub_key.save_pub_key_bio(bio)
return bio.read_all()
def Flush(self):
"""Flushes the cert cache."""
self.pub_key_cache.Flush()
def Put(self, destination, pub_key):
self.pub_key_cache.Put(destination, pub_key)
def GetRSAPublicKey(self, common_name="Server"):
"""Retrieve the relevant public key for that common name.
This maintains a cache of public keys or loads them from external
sources if available.
Args:
common_name: The common_name of the key we need.
Returns:
A valid public key.
"""
try:
pub_key = self.pub_key_cache.Get(common_name)
bio = BIO.MemoryBuffer(pub_key)
return RSA.load_pub_key_bio(bio)
except (KeyError, X509.X509Error):
raise KeyError("No certificate found")
class Cipher(object):
"""Holds keying information."""
hash_function = hashlib.sha256
hash_function_name = "sha256"
cipher_name = "aes_128_cbc"
key_size = 128
iv_size = 128
e_padding = RSA.pkcs1_oaep_padding
# These fields get filled in by the constructor
private_key = None
cipher = None
cipher_metadata = None
encrypted_cipher = None
encrypted_cipher_metadata = None
def __init__(self, source, destination, private_key, pub_key_cache):
self.private_key = private_key
self.cipher = rdfvalue.CipherProperties(
name=self.cipher_name,
key=Rand.rand_pseudo_bytes(self.key_size / 8)[0],
iv=Rand.rand_pseudo_bytes(self.iv_size / 8)[0],
hmac_key=Rand.rand_pseudo_bytes(self.key_size / 8)[0],
)
self.pub_key_cache = pub_key_cache
serialized_cipher = self.cipher.SerializeToString()
self.cipher_metadata = rdfvalue.CipherMetadata()
# Old clients interpret this as a string so we have to omit the "aff4:/"
# prefix on the wire. Can be removed after all clients have been updated.
self.cipher_metadata.SetWireFormat("source",
utils.SmartStr(source.Basename()))
# Sign this cipher.
digest = self.hash_function(serialized_cipher).digest()
# We never want to have a password dialog
private_key = RSA.load_key_string(str(self.private_key),
callback=lambda x: "")
self.cipher_metadata.signature = private_key.sign(
digest, self.hash_function_name)
# Now encrypt the cipher with our key
rsa_key = pub_key_cache.GetRSAPublicKey(destination)
stats.STATS.IncrementCounter("grr_rsa_operations")
self.encrypted_cipher = rsa_key.public_encrypt(
serialized_cipher, self.e_padding)
# Encrypt the metadata block symmetrically.
_, self.encrypted_cipher_metadata = self.Encrypt(
self.cipher_metadata.SerializeToString(), self.cipher.iv)
self.signature_verified = True
def Encrypt(self, data, iv=None):
"""Symmetrically encrypt the data using the optional iv."""
if iv is None:
iv = Rand.rand_pseudo_bytes(self.iv_size / 8)[0]
evp_cipher = EVP.Cipher(alg=self.cipher_name, key=self.cipher.key,
iv=iv, op=ENCRYPT)
ctext = evp_cipher.update(data)
ctext += evp_cipher.final()
return iv, ctext
def Decrypt(self, data, iv):
try:
evp_cipher = EVP.Cipher(alg=self.cipher_name, key=self.cipher.key,
iv=iv, op=DECRYPT)
text = evp_cipher.update(data)
text += evp_cipher.final()
return text
except EVP.EVPError as e:
raise DecryptionError(str(e))
def HMAC(self, data):
hmac = EVP.HMAC(self.cipher.hmac_key, algo="sha1")
hmac.update(data)
return hmac.final()
class ReceivedCipher(Cipher):
"""A cipher which we received from our peer."""
# Indicates if the cipher contained in the response_comms is verified.
signature_verified = False
# pylint: disable=super-init-not-called
def __init__(self, response_comms, private_key, pub_key_cache):
self.private_key = private_key
self.pub_key_cache = pub_key_cache
# Decrypt the message
private_key = RSA.load_key_string(str(self.private_key),
callback=lambda x: "")
try:
self.encrypted_cipher = response_comms.encrypted_cipher
self.serialized_cipher = private_key.private_decrypt(
response_comms.encrypted_cipher, self.e_padding)
self.cipher = rdfvalue.CipherProperties(self.serialized_cipher)
# Check the key lengths.
if (len(self.cipher.key) != self.key_size / 8 or
len(self.cipher.iv) != self.iv_size / 8):
raise DecryptionError("Invalid cipher.")
if response_comms.api_version >= 3:
if len(self.cipher.hmac_key) != self.key_size / 8:
raise DecryptionError("Invalid cipher.")
# New version: cipher_metadata contains information about the cipher.
# Decrypt the metadata symmetrically
self.encrypted_cipher_metadata = (
response_comms.encrypted_cipher_metadata)
self.cipher_metadata = rdfvalue.CipherMetadata(self.Decrypt(
response_comms.encrypted_cipher_metadata, self.cipher.iv))
self.VerifyCipherSignature()
else:
# Old version: To be set once the message is verified.
self.cipher_metadata = None
except RSA.RSAError as e:
raise DecryptionError(e)
def VerifyCipherSignature(self):
"""Verify the signature on the encrypted cipher block."""
if self.cipher_metadata.signature:
digest = self.hash_function(self.serialized_cipher).digest()
try:
remote_public_key = self.pub_key_cache.GetRSAPublicKey(
self.cipher_metadata.source)
stats.STATS.IncrementCounter("grr_rsa_operations")
if remote_public_key.verify(digest, self.cipher_metadata.signature,
self.hash_function_name) == 1:
self.signature_verified = True
else:
raise DecryptionError("Signature not verified by remote public key.")
except (X509.X509Error, RSA.RSAError) as e:
raise DecryptionError(e)
except UnknownClientCert:
pass
class Communicator(object):
"""A class responsible for encoding and decoding comms."""
server_name = None
def __init__(self, certificate=None, private_key=None):
"""Creates a communicator.
Args:
certificate: Our own certificate in string form (as PEM).
private_key: Our own private key in string form (as PEM).
"""
# A cache of cipher objects.
self.cipher_cache = utils.TimeBasedCache()
self.private_key = private_key
self.certificate = certificate
# A cache for encrypted ciphers
self.encrypted_cipher_cache = utils.FastStore(max_size=50000)
# A cache of public keys
self.pub_key_cache = PubKeyCache()
self._LoadOurCertificate()
def _LoadOurCertificate(self):
self.cert = X509.load_cert_string(str(self.certificate))
# Our common name
self.common_name = PubKeyCache.GetCNFromCert(self.cert)
# Make sure we know about our own public key
self.pub_key_cache.Put(
self.common_name, self.pub_key_cache.PubKeyFromCert(self.cert))
def EncodeMessageList(self, message_list, signed_message_list):
"""Encode the MessageList into the signed_message_list rdfvalue."""
# By default uncompress
uncompressed_data = message_list.SerializeToString()
signed_message_list.message_list = uncompressed_data
if config_lib.CONFIG["Network.compression"] == "ZCOMPRESS":
compressed_data = zlib.compress(uncompressed_data)
# Only compress if it buys us something.
if len(compressed_data) < len(uncompressed_data):
signed_message_list.compression = (
rdfvalue.SignedMessageList.CompressionType.ZCOMPRESSION)
signed_message_list.message_list = compressed_data
def EncodeMessages(self, message_list, result, destination=None,
timestamp=None, api_version=2):
"""Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of
GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version.
"""
if api_version not in [2, 3]:
raise RuntimeError("Unsupported api version.")
if destination is None:
destination = self.server_name
# Make a nonce for this transaction
if timestamp is None:
self.timestamp = timestamp = long(time.time() * 1000000)
# Do we have a cached cipher to talk to this destination?
try:
cipher = self.cipher_cache.Get(destination)
except KeyError:
# Make a new one
cipher = Cipher(self.common_name, destination, self.private_key,
self.pub_key_cache)
self.cipher_cache.Put(destination, cipher)
signed_message_list = rdfvalue.SignedMessageList(timestamp=timestamp)
self.EncodeMessageList(message_list, signed_message_list)
# TODO(user): This is for backwards compatibility. Remove when all
# clients are moved to new scheme.
if api_version == 2:
signed_message_list.SetWireFormat(
"source", utils.SmartStr(self.common_name.Basename()))
# Old scheme - message list is signed.
digest = cipher.hash_function(signed_message_list.message_list).digest()
# We never want to have a password dialog
private_key = RSA.load_key_string(str(self.private_key),
callback=lambda x: "")
signed_message_list.signature = private_key.sign(
digest, cipher.hash_function_name)
elif api_version == 3:
result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata
# Include the encrypted cipher.
result.encrypted_cipher = cipher.encrypted_cipher
serialized_message_list = signed_message_list.SerializeToString()
# Encrypt the message symmetrically.
if api_version >= 3:
# New scheme cipher is signed plus hmac over message list.
result.iv, result.encrypted = cipher.Encrypt(serialized_message_list)
result.hmac = cipher.HMAC(result.encrypted)
else:
_, result.encrypted = cipher.Encrypt(serialized_message_list,
cipher.cipher.iv)
result.api_version = api_version
if isinstance(result, rdfvalue.RDFValue):
# Store the number of messages contained.
result.num_messages = len(message_list)
return timestamp
def DecryptMessage(self, encrypted_response):
"""Decrypt the serialized, encrypted string.
Args:
encrypted_response: A serialized and encrypted string.
Returns:
a Signed_Message_List rdfvalue
"""
try:
response_comms = rdfvalue.ClientCommunication(encrypted_response)
return self.DecodeMessages(response_comms)
except (rdfvalue.DecodeError, type_info.TypeValueError) as e:
raise DecodingError("Protobuf parsing error: %s" % e)
def DecompressMessageList(self, signed_message_list):
"""Decompress the message data from signed_message_list.
Args:
signed_message_list: A SignedMessageList rdfvalue with some data in it.
Returns:
a MessageList rdfvalue.
Raises:
DecodingError: If decompression fails.
"""
compression = signed_message_list.compression
if compression == rdfvalue.SignedMessageList.CompressionType.UNCOMPRESSED:
data = signed_message_list.message_list
elif compression == rdfvalue.SignedMessageList.CompressionType.ZCOMPRESSION:
try:
data = zlib.decompress(signed_message_list.message_list)
except zlib.error as e:
raise DecodingError("Failed to decompress: %s" % e)
else:
raise DecodingError("Compression scheme not supported")
try:
result = rdfvalue.MessageList(data)
except rdfvalue.DecodeError:
raise DecodingError("RDFValue parsing failed.")
return result
def DecodeMessages(self, response_comms):
"""Extract and verify server message.
Args:
response_comms: A ClientCommunication rdfvalue
Returns:
list of messages and the CN where they came from.
Raises:
DecryptionError: If the message failed to decrypt properly.
"""
if response_comms.api_version not in [2, 3]:
raise DecryptionError("Unsupported api version.")
if response_comms.encrypted_cipher:
# Have we seen this cipher before?
try:
cipher = self.encrypted_cipher_cache.Get(
response_comms.encrypted_cipher)
except KeyError:
cipher = ReceivedCipher(response_comms, self.private_key,
self.pub_key_cache)
if cipher.signature_verified:
# Remember it for next time.
self.encrypted_cipher_cache.Put(response_comms.encrypted_cipher,
cipher)
# Add entropy to the PRNG.
Rand.rand_add(response_comms.encrypted, len(response_comms.encrypted))
# Decrypt the messages
iv = response_comms.iv or cipher.cipher.iv
plain = cipher.Decrypt(response_comms.encrypted, iv)
try:
signed_message_list = rdfvalue.SignedMessageList(plain)
except rdfvalue.DecodeError as e:
raise DecryptionError(str(e))
message_list = self.DecompressMessageList(signed_message_list)
else:
# The message is not encrypted. We do not allow unencrypted
# messages:
raise DecryptionError("Server response is not encrypted.")
# Are these messages authenticated?
auth_state = self.VerifyMessageSignature(
response_comms, signed_message_list, cipher,
response_comms.api_version)
# Mark messages as authenticated and where they came from.
for msg in message_list.job:
msg.auth_state = auth_state
msg.SetWireFormat("source", utils.SmartStr(
cipher.cipher_metadata.source.Basename()))
return (message_list.job, cipher.cipher_metadata.source,
signed_message_list.timestamp)
def VerifyMessageSignature(self, response_comms, signed_message_list,
cipher, api_version):
"""Verify the message list signature.
This is the way the messages are verified in the client.
In the client we also check that the nonce returned by the server is correct
(the timestamp doubles as a nonce). If the nonce fails we deem the response
unauthenticated since it might have resulted from a replay attack.
Args:
response_comms: The raw response_comms rdfvalue.
signed_message_list: The SignedMessageList rdfvalue from the server.
cipher: The cipher belonging to the remote end.
api_version: The api version we should use.
Returns:
a rdfvalue.GrrMessage.AuthorizationState.
Raises:
DecryptionError: if the message is corrupt.
"""
result = rdfvalue.GrrMessage.AuthorizationState.UNAUTHENTICATED
if api_version < 3:
# Old version: signature is on the message_list
digest = cipher.hash_function(
signed_message_list.message_list).digest()
remote_public_key = self.pub_key_cache.GetRSAPublicKey(
signed_message_list.source)
stats.STATS.IncrementCounter("grr_rsa_operations")
if remote_public_key.verify(digest, signed_message_list.signature,
cipher.hash_function_name) == 1:
stats.STATS.IncrementCounter("grr_authenticated_messages")
result = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
else:
if cipher.HMAC(response_comms.encrypted) != response_comms.hmac:
raise DecryptionError("HMAC verification failed.")
# Give the cipher another chance to check its signature.
if not cipher.signature_verified:
cipher.VerifyCipherSignature()
if cipher.signature_verified:
stats.STATS.IncrementCounter("grr_authenticated_messages")
result = rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED
# Check for replay attacks. We expect the server to return the same
# timestamp nonce we sent.
if signed_message_list.timestamp != self.timestamp:
result = rdfvalue.GrrMessage.AuthorizationState.UNAUTHENTICATED
if not cipher.cipher_metadata:
# Fake the metadata
cipher.cipher_metadata = rdfvalue.CipherMetadata()
cipher.cipher_metadata.SetWireFormat(
"source", utils.SmartStr(signed_message_list.source.Basename()))
return result
| apache-2.0 | 1,933,268,720,333,809,700 | 32.045455 | 80 | 0.675174 | false |
Pardus-Ahtapot/GDYS | ahtapot-gdys-gui/var/opt/gdysgui/gitlab_check.py | 1 | 4270 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gitlab
import os
import config_parser as CP
from datetime import datetime
import sys
from dmrlogger import Syslogger
from dmrlogger import Filelogger
from time import sleep
import subprocess
abs_path = os.path.abspath(__file__)
path_list = abs_path.split("/")
del path_list[-1]
path_name="/".join(path_list)
full_path = path_name + "/"
if os.path.exists(full_path + "current_user.dmr"):
with open(full_path + "current_user.dmr") as current_user:
user = current_user.readline()
else:
user = subprocess.check_output(["whoami"])
logger = Syslogger("FWBUILDER-AHTAPOT",'%(name)s %(levelname)s %(message)s',"/dev/log", user)
filelogger = Filelogger("FWBUILDER-AHTAPOT",'%(asctime)s %(name)s %(levelname)s %(message)s',"/var/log/ahtapot/gdys-gui.log","a", user)
def gitlab_connect(gitlab_url,user,password):
git = gitlab.Gitlab(gitlab_url)
git.login(user=user,password=password)
return git
def check_mergerequest(git,project_id):
sleep(1)
mergerequests = git.getmergerequests(project_id)
if mergerequests != False:
for merge in mergerequests:
if merge["state"] == "opened" or merge["state"] == "reopened":
return False
return True
def create_mergerequest(git,project_id,source_branch,target_branch,title):
return git.createmergerequest(project_id,source_branch,target_branch,title)
def get_mergerequest_status(git,project_id):
if git.getmergerequests(project_id)!=False:
if len(git.getmergerequests(project_id)) != 0:
return git.getmergerequests(project_id)[0]["state"]
return False
def check_merge_confirm():
abs_path = os.path.abspath(__file__)
path_list = abs_path.split("/")
del path_list[-1]
path_name = "/".join(path_list)
full_path = path_name + "/"
if os.path.exists(full_path + "onay.dmr"):
return True
return False
def get_projects(git):
print git.getprojects()
def set_project_id(git,project_name):
projects = git.getprojects()
for project in projects:
if project["name"] == project_name:
CP.set_gitlab_config({"project_id":project["id"]})
def check_gitlab_connection(config):
try:
git = gitlab.Gitlab(str(config["gitlab_url"]))
git.login(user=config["gitlab_user"],password=config["gitlab_pass"])
return True,git
except Exception as exc_err:
logger.send_log("error", " Can't connect gitlab \n"+str(exc_err))
filelogger.send_log("error", " Can't connect gitlab \n"+str(exc_err))
return u"Gitlab bağlantı bilgileri hatalı.",False
def check_gitlab_settings(git,config):
error_message = ""
check_project = False
project_id = ""
for project in git.getprojects():
if project["name"] == config["gitlab_project_name"]:
check_project = True
project_id = project["id"]
break
if check_project == False:
return u" Proje Adı Hatalı "
check_confirm_branch = False
check_merge_branch = False
for branch in git.getbranches(project_id):
if branch["name"] == config["gitlab_confirm_branch"]:
check_confirm_branch = True
if branch["name"] == config["gitlab_master_branch"]:
check_merge_branch = True
if check_confirm_branch == False:
return u" Onay Dalı Hatalı "
if check_merge_branch == False:
return u" Ana Dal Hatalı "
return True
def return_date(dt):
dt_list = dt.split(".")
del dt_list[-1]
dt = "".join(dt_list)
dt = datetime.strptime(dt,"%Y-%m-%dT%H:%M:%S")
new_date = dt.strftime("%d/%m/%Y %H:%M:%S")
return new_date
def get_master_date(git,project_id,master_name):
projects = git.getbranches(project_id)
dt = ""
for project in projects:
if project["name"] == master_name:
dt = project["commit"]["committed_date"]
if dt!="":
return return_date(dt)
return False
def get_master_commit_id(git,project_id,master_name):
projects = git.getbranches(project_id)
if projects != False:
for project in projects:
if project["name"] == master_name:
return str(project["commit"]["id"])
return False
| gpl-3.0 | 1,185,567,113,086,604,300 | 30.57037 | 135 | 0.638198 | false |
sophie63/FlyLFM | stanford_lfanalyze_v0.4/lflib/calibration/frames.py | 1 | 6602 | # Frame of reference manager for scope.py
#
# We manage the following frame of reference here:
#
# - Camera pixels to camera lenslets
# - Camera lenslets to projector lenslets
# - Projector pixels to projector lenslets
# - Camera lenslets to x,y positions in micrometers in the native plane.
import numpy as np
from calibration.constants import *
import os, math, sys
# ------------------------------------------------------------------
# FRAME NAMES
# ------------------------------------------------------------------
CAMERA_PIXEL_FRAME = 1
PROJECTOR_PIXEL_FRAME = 2
CAMERA_LENSLET_FRAME = 3
PROJECTOR_LENSLET_FRAME = 4
SAMPLE_MICRON_FRAME = 5
# ------------------------------------------------------------------
# FRAME MANAGER CLASS
# ------------------------------------------------------------------
# FrameManager Class
#
# This class helps to keep track of the various frames of reference
# commonly used in the uScope application.
class FrameManager(object):
def __init__(self):
self.campixel_to_camlens = AffineWarp()
self.projpixel_to_projlens = AffineWarp()
self.camlens_to_projpixel = AffineWarp()
# For the frame manager to reload the calibration files
def reload_calibration(self, root_directory):
try:
self.campixel_to_camlens.load(os.path.join(root_directory, CALIBRATION_CAMPIXEL_TO_CAMLENS))
# self.projpixel_to_projlens.load(os.path.join(root_directory, CALIBRATION_PROJPIXEL_TO_PROJLENS))
# self.camlens_to_projpixel.load(os.path.join(root_directory, CALIBRATION_CAMLENS_TO_PROJPIXEL))
except IOError:
print 'ERROR: could not load calibration file!'
sys.exit(1)
self.camlens_to_sample = None
# For the frame manager to reload the calibration files
def save_calibration(self, directory):
try:
self.campixel_to_camlens.save(os.path.join(directory,
os.path.basename(CALIBRATION_CAMPIXEL_TO_CAMLENS)))
self.projpixel_to_projlens.save(os.path.join(directory,
os.path.basename(CALIBRATION_PROJPIXEL_TO_PROJLENS)))
self.camlens_to_projpixel.save(os.path.join(directory,
os.path.basename(CALIBRATION_CAMLENS_TO_PROJPIXEL)))
except IOError:
print 'WARNING: could not load calibration files. The system needs to be calibrated.'
self.camlens_to_sample = None
# Coordinate transform method
def transform_coordinates(self, coords, src_frame, dst_frame):
if (dst_frame == CAMERA_PIXEL_FRAME):
if (src_frame == PROJECTOR_PIXEL_FRAME):
return self.transform_coordinates(self.camlens_to_projpixel.reverse(coords),
CAMERA_LENSLET_FRAME, dst_frame)
if (src_frame == PROJECTOR_LENSLET_FRAME):
return self.transform_coordinates(self.projpixel_to_projlens.reverse(coords),
PROJECTOR_PIXEL_FRAME, dst_frame)
if (src_frame == CAMERA_LENSLET_FRAME):
return self.campixel_to_camlens.reverse(coords)
if (src_frame == SAMPLE_MICRON_FRAME):
return self.transform_coordinates(self.camlens_to_sample.reverse(coords),
CAMERA_LENSLET_FRAME, dst_frame)
if (dst_frame == PROJECTOR_PIXEL_FRAME):
if (src_frame == CAMERA_PIXEL_FRAME):
return self.transform_coordinates(self.campixel_to_camlens.forward(coords),
CAMERA_LENSLET_FRAME, dst_frame)
if (src_frame == PROJECTOR_LENSLET_FRAME):
return self.projpixel_to_projlens.reverse(coords)
if (src_frame == CAMERA_LENSLET_FRAME):
return self.camlens_to_projpixel.forward(coords)
if (src_frame == SAMPLE_MICRON_FRAME):
return self.transform_coordinates(self.camlens_to_sample.reverse(coords),
CAMERA_LENSLET_FRAME, dst_frame)
if (dst_frame == CAMERA_LENSLET_FRAME):
if (src_frame == CAMERA_PIXEL_FRAME):
return self.campixel_to_camlens.forward(coords)
if (src_frame == PROJECTOR_LENSLET_FRAME):
return self.transform_coordinates(self.projpixel_to_projlens.reverse(coords),
PROJECTOR_PIXEL_FRAME, dst_frame)
if (src_frame == PROJECTOR_PIXEL_FRAME):
return self.camlens_to_projpixel.reverse(coords)
if (src_frame == SAMPLE_MICRON_FRAME):
return self.camlens_to_sample.reverse(coords)
if (dst_frame == PROJECTOR_LENSLET_FRAME):
if (src_frame == CAMERA_PIXEL_FRAME):
return self.transform_coordinates(self.campixel_to_camlens.forward(coords),
CAMERA_LENSLET_FRAME, dst_frame)
if (src_frame == CAMERA_LENSLET_FRAME):
return self.transform_coordinates(self.camlens_to_projpixel.forward(coords),
PROJECTOR_PIXEL_FRAME, dst_frame)
if (src_frame == PROJECTOR_PIXEL_FRAME):
return self.projpixel_to_projlens.forward(coords)
if (src_frame == SAMPLE_MICRON_FRAME):
return self.transform_coordinates(self.camlens_to_sample.reverse(coords),
CAMERA_LENSLET_FRAME, dst_frame)
if (dst_frame == SAMPLE_MICRON_FRAME):
if (src_frame == CAMERA_PIXEL_FRAME):
return self.transform_coordinates(self.campixel_to_camlens.forward(coords),
CAMERA_LENSLET_FRAME, dst_frame)
if (src_frame == CAMERA_LENSLET_FRAME):
return self.camlens_to_sample.forward(coords)
if (src_frame == PROJECTOR_LENSLET_FRAME):
return self.transform_coordinates(self.projpixel_to_projlens.reverse(coords),
PROJECTOR_PIXEL_FRAME, dst_frame)
if (src_frame == PROJECTOR_PIXEL_FRAME):
return self.transform_coordinates(self.camlens_to_projpixel.reverse(coords),
CAMERA_LENSLET_FRAME, dst_frame)
| bsd-2-clause | -4,746,820,665,191,476,000 | 51.396825 | 110 | 0.556952 | false |
jkyeung/XlsxWriter | xlsxwriter/test/worksheet/test_sparkline03.py | 1 | 5440 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = 'Sheet1'
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row('A1', data)
worksheet.write_row('A2', data)
# Set up sparklines.
worksheet.add_sparkline('F1', {'range': 'Sheet1!A1:E1'})
worksheet.add_sparkline('F2', {'range': 'Sheet1!A2:E2'})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
<row r="2" spans="1:5" x14ac:dyDescent="0.25">
<c r="A2">
<v>-2</v>
</c>
<c r="B2">
<v>2</v>
</c>
<c r="C2">
<v>3</v>
</c>
<c r="D2">
<v>-1</v>
</c>
<c r="E2">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup displayEmptyCellsAs="gap">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A2:E2</xm:f>
<xm:sqref>F2</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
<x14:sparklineGroup displayEmptyCellsAs="gap">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause | -5,980,905,048,024,247,000 | 41.5 | 337 | 0.415809 | false |
souradeep100/messenger_smart_entertainer | server.py | 1 | 1851 | # coding: utf-8
import warnings
import os
from flask import Flask, request
from subprocess import call
import chatbot
import messenger
app = Flask(__name__)
FACEBOOK_TOKEN = "page token generated on messenger page of facebook app dashboard"
bot = None
@app.route('/', methods=['GET'])
def verify():
if request.args.get('hub.verify_token', '') == 'the token for verification given during webhook':
return request.args.get('hub.challenge', '')
else:
return 'Error, wrong validation token'
@app.route('/', methods=['POST'])
def webhook():
payload = request.get_data()
for sender, message in messenger.messaging_events(payload):
print "Incoming from %s: %s" % (sender, message)
# response = bot.respond_to(message)
if "hi" in message:
response ="hello: type 1:for play music 2: for youtube arijit singh"
print "Outgoing to %s: %s" % (sender, response)
messenger.send_message(FACEBOOK_TOKEN, sender, response)
if "1" in message:
response ="playing now"
print "Outgoing to %s: %s" % (sender, response)
call(["/usr/bin/rhythmbox-client","--play"])
messenger.send_message(FACEBOOK_TOKEN, sender, response)
if "2" in message:
response ="playing arijit singh youtube"
print "Outgoing to %s: %s" % (sender, response)
call(["/usr/bin/google-chrome"," https://www.youtube.com/watch?v=Z7hD0TUV24c"])
messenger.send_message(FACEBOOK_TOKEN, sender, response)
return "ok"
if __name__ == '__main__':
# Suppress nltk warnings about not enough data
warnings.filterwarnings('ignore', '.*returning an arbitrary sample.*',)
if os.path.exists("corpus.txt"):
bot = chatbot.Bot(open("corpus.txt").read())
app.run(port=8080, debug=True)
| apache-2.0 | 6,455,081,532,198,493,000 | 33.924528 | 101 | 0.63047 | false |
ideasman42/isect_segments-bentley_ottmann | tests/data/test_degenerate_colinear_02.py | 1 | 3561 | data = (
((0.200000, 0.700000), (0.900000, 0.700000)),
((0.000000, -1.000000), (0.000000, 0.900000)),
((0.100000, 0.700000), (0.800000, 0.700000)),
((0.000000, -0.900000), (0.000000, 1.000000)),
((0.200000, 0.600000), (0.900000, 0.600000)),
((0.100000, 0.600000), (0.800000, 0.600000)),
((0.200000, 0.500000), (0.900000, 0.500000)),
((0.100000, 0.500000), (0.800000, 0.500000)),
((0.200000, 0.400000), (0.900000, 0.400000)),
((0.100000, 0.400000), (0.800000, 0.400000)),
((0.200000, 0.300000), (0.900000, 0.300000)),
((0.100000, 0.300000), (0.800000, 0.300000)),
((0.200000, 0.200000), (0.900000, 0.200000)),
((0.100000, 0.200000), (0.800000, 0.200000)),
((0.200000, 0.100000), (0.900000, 0.100000)),
((0.100000, 0.100000), (0.800000, 0.100000)),
((0.200000, 0.800000), (0.900000, 0.800000)),
((0.100000, 0.800000), (0.800000, 0.800000)),
((0.200000, 0.900000), (0.900000, 0.900000)),
((0.100000, 0.900000), (0.800000, 0.900000)),
((-0.900000, -0.300000), (-0.100000, -0.300000)),
((-1.000000, -0.300000), (-0.200000, -0.300000)),
((-0.900000, -0.400000), (-0.100000, -0.400000)),
((-1.000000, -0.400000), (-0.200000, -0.400000)),
((-0.900000, -0.500000), (-0.100000, -0.500000)),
((-1.000000, -0.500000), (-0.200000, -0.500000)),
((-0.900000, -0.600000), (-0.100000, -0.600000)),
((-1.000000, -0.600000), (-0.200000, -0.600000)),
((-0.900000, -0.700000), (-0.100000, -0.700000)),
((-1.000000, -0.700000), (-0.200000, -0.700000)),
((-0.900000, -0.800000), (-0.100000, -0.800000)),
((-1.000000, -0.800000), (-0.200000, -0.800000)),
((-0.900000, -0.900000), (-0.100000, -0.900000)),
((-1.000000, -0.900000), (-0.200000, -0.900000)),
((-0.900000, -0.200000), (-0.100000, -0.200000)),
((-1.000000, -0.200000), (-0.200000, -0.200000)),
((-0.900000, -0.100000), (-0.100000, -0.100000)),
((-1.000000, -0.100000), (-0.200000, -0.100000)),
((-0.700000, 0.200000), (-0.700000, 0.900000)),
((-0.700000, 0.100000), (-0.700000, 0.800000)),
((-0.600000, 0.200000), (-0.600000, 0.900000)),
((-0.600000, 0.100000), (-0.600000, 0.800000)),
((-0.500000, 0.200000), (-0.500000, 0.900000)),
((-0.500000, 0.100000), (-0.500000, 0.800000)),
((-0.400000, 0.200000), (-0.400000, 0.900000)),
((-0.400000, 0.100000), (-0.400000, 0.800000)),
((-0.300000, 0.200000), (-0.300000, 0.900000)),
((-0.300000, 0.100000), (-0.300000, 0.800000)),
((-0.200000, 0.200000), (-0.200000, 0.900000)),
((-0.200000, 0.100000), (-0.200000, 0.800000)),
((-0.100000, 0.200000), (-0.100000, 0.900000)),
((-0.100000, 0.100000), (-0.100000, 0.800000)),
((-0.800000, 0.200000), (-0.800000, 0.900000)),
((-0.800000, 0.100000), (-0.800000, 0.800000)),
((-0.900000, 0.200000), (-0.900000, 0.900000)),
((-0.900000, 0.100000), (-0.900000, 0.800000)),
((0.300000, -0.800000), (0.300000, -0.100000)),
((0.300000, -0.900000), (0.300000, -0.200000)),
((0.400000, -0.800000), (0.400000, -0.100000)),
((0.400000, -0.900000), (0.400000, -0.200000)),
((0.500000, -0.800000), (0.500000, -0.100000)),
((0.500000, -0.900000), (0.500000, -0.200000)),
((0.600000, -0.800000), (0.600000, -0.100000)),
((0.600000, -0.900000), (0.600000, -0.200000)),
((0.700000, -0.800000), (0.700000, -0.100000)),
((0.700000, -0.900000), (0.700000, -0.200000)),
((0.800000, -0.800000), (0.800000, -0.100000)),
((0.800000, -0.900000), (0.800000, -0.200000)),
((0.900000, -0.800000), (0.900000, -0.100000)),
((0.900000, -0.900000), (0.900000, -0.200000)),
((0.200000, -0.800000), (0.200000, -0.100000)),
((0.200000, -0.900000), (0.200000, -0.200000)),
((0.100000, -0.800000), (0.100000, -0.100000)),
((0.100000, -0.900000), (0.100000, -0.200000)),
)
| mit | 6,510,588,838,798,951,000 | 45.855263 | 49 | 0.582982 | false |
jabesq/home-assistant | homeassistant/components/pushover/notify.py | 1 | 4061 | """Pushover platform for notify component."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA,
BaseNotificationService)
_LOGGER = logging.getLogger(__name__)
ATTR_ATTACHMENT = 'attachment'
CONF_USER_KEY = 'user_key'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USER_KEY): cv.string,
vol.Required(CONF_API_KEY): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Pushover notification service."""
from pushover import InitError
try:
return PushoverNotificationService(
hass, config[CONF_USER_KEY], config[CONF_API_KEY])
except InitError:
_LOGGER.error("Wrong API key supplied")
return None
class PushoverNotificationService(BaseNotificationService):
"""Implement the notification service for Pushover."""
def __init__(self, hass, user_key, api_token):
"""Initialize the service."""
from pushover import Client
self._hass = hass
self._user_key = user_key
self._api_token = api_token
self.pushover = Client(
self._user_key, api_token=self._api_token)
def send_message(self, message='', **kwargs):
"""Send a message to a user."""
from pushover import RequestError
# Make a copy and use empty dict if necessary
data = dict(kwargs.get(ATTR_DATA) or {})
data['title'] = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
# Check for attachment.
if ATTR_ATTACHMENT in data:
# If attachment is a URL, use requests to open it as a stream.
if data[ATTR_ATTACHMENT].startswith('http'):
try:
import requests
response = requests.get(
data[ATTR_ATTACHMENT],
stream=True,
timeout=5)
if response.status_code == 200:
# Replace the attachment identifier with file object.
data[ATTR_ATTACHMENT] = response.content
else:
_LOGGER.error('Image not found')
# Remove attachment key to send without attachment.
del data[ATTR_ATTACHMENT]
except requests.exceptions.RequestException as ex_val:
_LOGGER.error(ex_val)
# Remove attachment key to try sending without attachment
del data[ATTR_ATTACHMENT]
else:
# Not a URL, check valid path first
if self._hass.config.is_allowed_path(data[ATTR_ATTACHMENT]):
# try to open it as a normal file.
try:
file_handle = open(data[ATTR_ATTACHMENT], 'rb')
# Replace the attachment identifier with file object.
data[ATTR_ATTACHMENT] = file_handle
except OSError as ex_val:
_LOGGER.error(ex_val)
# Remove attachment key to send without attachment.
del data[ATTR_ATTACHMENT]
else:
_LOGGER.error('Path is not whitelisted')
# Remove attachment key to send without attachment.
del data[ATTR_ATTACHMENT]
targets = kwargs.get(ATTR_TARGET)
if not isinstance(targets, list):
targets = [targets]
for target in targets:
if target is not None:
data['device'] = target
try:
self.pushover.send_message(message, **data)
except ValueError as val_err:
_LOGGER.error(val_err)
except RequestError:
_LOGGER.exception("Could not send pushover notification")
| apache-2.0 | 1,982,162,388,152,748,800 | 36.256881 | 77 | 0.565624 | false |
partofthething/home-assistant | homeassistant/components/smartthings/fan.py | 1 | 3105 | """Support for fans through the SmartThings cloud API."""
import math
from typing import Optional, Sequence
from pysmartthings import Capability
from homeassistant.components.fan import SUPPORT_SET_SPEED, FanEntity
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
SPEED_RANGE = (1, 3) # off is not included
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add fans for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsFan(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "fan")
]
)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
supported = [Capability.switch, Capability.fan_speed]
# Must have switch and fan_speed
if all(capability in capabilities for capability in supported):
return supported
class SmartThingsFan(SmartThingsEntity, FanEntity):
"""Define a SmartThings Fan."""
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed percentage of the fan."""
if percentage is None:
await self._device.switch_on(set_status=True)
elif percentage == 0:
await self._device.switch_off(set_status=True)
else:
value = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
await self._device.set_fan_speed(value, set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
async def async_turn_on(
self,
speed: str = None,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Turn the fan on."""
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the fan off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_write_ha_state()
@property
def is_on(self) -> bool:
"""Return true if fan is on."""
return self._device.status.switch
@property
def percentage(self) -> int:
"""Return the current speed percentage."""
return ranged_value_to_percentage(SPEED_RANGE, self._device.status.fan_speed)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return int_states_in_range(SPEED_RANGE)
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
| mit | -5,242,772,678,061,566,000 | 33.120879 | 85 | 0.655717 | false |
kevin2seedlink/newrelic-plugin-agent | newrelic_plugin_agent/plugins/postgresql.py | 1 | 13830 | """
PostgreSQL Plugin
"""
import logging
import psycopg2
from psycopg2 import extensions
from psycopg2 import extras
from newrelic_plugin_agent.plugins import base
LOGGER = logging.getLogger(__name__)
ARCHIVE = """SELECT CAST(COUNT(*) AS INT) AS file_count,
CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT)
AS ready_count,CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),
0) AS INT) AS done_count FROM pg_catalog.pg_ls_dir('pg_xlog/archive_status')
AS archive_files (archive_file);"""
BACKENDS = """SELECT count(*) - ( SELECT count(*) FROM pg_stat_activity WHERE
current_query = '<IDLE>' ) AS backends_active, ( SELECT count(*) FROM
pg_stat_activity WHERE current_query = '<IDLE>' ) AS backends_idle
FROM pg_stat_activity;"""
BACKENDS_9_2 = """SELECT count(*) - ( SELECT count(*) FROM pg_stat_activity WHERE
state = 'idle' ) AS backends_active, ( SELECT count(*) FROM
pg_stat_activity WHERE state = 'idle' ) AS backends_idle
FROM pg_stat_activity;"""
TABLE_SIZE_ON_DISK = """SELECT ((sum(relpages)* 8) * 1024) AS
size_relations FROM pg_class WHERE relkind IN ('r', 't');"""
TABLE_COUNT = """SELECT count(1) as relations FROM pg_class WHERE
relkind IN ('r', 't');"""
INDEX_SIZE_ON_DISK = """SELECT ((sum(relpages)* 8) * 1024) AS
size_indexes FROM pg_class WHERE relkind = 'i';"""
INDEX_COUNT = """SELECT count(1) as indexes FROM pg_class WHERE
relkind = 'i';"""
TRANSACTIONS = """SELECT sum(xact_commit) AS transactions_committed,
sum(xact_rollback) AS transactions_rollback, sum(blks_read) AS blocks_read,
sum(blks_hit) AS blocks_hit, sum(tup_returned) AS tuples_returned,
sum(tup_fetched) AS tuples_fetched, sum(tup_inserted) AS tuples_inserted,
sum(tup_updated) AS tuples_updated, sum(tup_deleted) AS tuples_deleted
FROM pg_stat_database;"""
STATIO = """SELECT sum(heap_blks_read) AS heap_blocks_read, sum(heap_blks_hit)
AS heap_blocks_hit, sum(idx_blks_read) AS index_blocks_read, sum(idx_blks_hit)
AS index_blocks_hit, sum(toast_blks_read) AS toast_blocks_read,
sum(toast_blks_hit) AS toast_blocks_hit, sum(tidx_blks_read)
AS toastindex_blocks_read, sum(tidx_blks_hit) AS toastindex_blocks_hit
FROM pg_statio_all_tables WHERE schemaname <> 'pg_catalog';"""
BGWRITER = 'SELECT * FROM pg_stat_bgwriter;'
DATABASE = 'SELECT * FROM pg_stat_database;'
LOCKS = 'SELECT mode, count(mode) AS count FROM pg_locks ' \
'GROUP BY mode ORDER BY mode;'
REPLICATION = """
SELECT
client_hostname,
client_addr,
state,
sent_offset - (
replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
FROM (
SELECT
client_addr, client_hostname, state,
('x' || lpad(split_part(sent_location, '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
('x' || lpad(split_part(replay_location, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
('x' || lpad(split_part(sent_location, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
('x' || lpad(split_part(replay_location, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
FROM pg_stat_replication
) AS s;
"""
LOCK_MAP = {'AccessExclusiveLock': 'Locks/Access Exclusive',
'AccessShareLock': 'Locks/Access Share',
'ExclusiveLock': 'Locks/Exclusive',
'RowExclusiveLock': 'Locks/Row Exclusive',
'RowShareLock': 'Locks/Row Share',
'ShareUpdateExclusiveLock': 'Locks/Update Exclusive Lock',
'ShareLock': 'Locks/Share',
'ShareRowExclusiveLock': 'Locks/Share Row Exclusive',
'SIReadLock': 'Locks/SI Read'}
class PostgreSQL(base.Plugin):
GUID = 'com.meetme.newrelic_postgresql_agent'
def add_stats(self, cursor):
self.add_backend_stats(cursor)
self.add_bgwriter_stats(cursor)
self.add_database_stats(cursor)
self.add_lock_stats(cursor)
if self.config.get('relation_stats', True):
self.add_index_stats(cursor)
self.add_statio_stats(cursor)
self.add_table_stats(cursor)
self.add_replication_stats(cursor)
self.add_transaction_stats(cursor)
# add_wal_metrics needs superuser to get directory listings
if self.config.get('superuser', True):
self.add_wal_stats(cursor)
def add_database_stats(self, cursor):
cursor.execute(DATABASE)
temp = cursor.fetchall()
for row in temp:
database = row['datname']
self.add_gauge_value('Database/%s/Backends' % database, 'processes',
row.get('numbackends', 0))
self.add_derive_value('Database/%s/Transactions/Committed' %
database, 'transactions',
int(row.get('xact_commit', 0)))
self.add_derive_value('Database/%s/Transactions/Rolled Back' %
database, 'transactions',
int(row.get('xact_rollback', 0)))
self.add_derive_value('Database/%s/Tuples/Read from Disk' %
database, 'tuples',
int(row.get('blks_read', 0)))
self.add_derive_value('Database/%s/Tuples/Read cache hit' %
database, 'tuples',
int(row.get('blks_hit', 0)))
self.add_derive_value('Database/%s/Tuples/Returned/From Sequential '
'Scan' % database, 'tuples',
int(row.get('tup_returned', 0)))
self.add_derive_value('Database/%s/Tuples/Returned/From Bitmap '
'Scan' % database, 'tuples',
int(row.get('tup_fetched', 0)))
self.add_derive_value('Database/%s/Tuples/Writes/Inserts' %
database, 'tuples',
int(row.get('tup_inserted', 0)))
self.add_derive_value('Database/%s/Tuples/Writes/Updates' %
database, 'tuples',
int(row.get('tup_updated', 0)))
self.add_derive_value('Database/%s/Tuples/Writes/Deletes' %
database, 'tuples',
int(row.get('tup_deleted', 0)))
self.add_derive_value('Database/%s/Conflicts' %
database, 'tuples',
int(row.get('conflicts', 0)))
def add_backend_stats(self, cursor):
if self.server_version < (9, 2, 0):
cursor.execute(BACKENDS)
else:
cursor.execute(BACKENDS_9_2)
temp = cursor.fetchone()
self.add_gauge_value('Backends/Active', 'processes',
temp.get('backends_active', 0))
self.add_gauge_value('Backends/Idle', 'processes',
temp.get('backends_idle', 0))
def add_bgwriter_stats(self, cursor):
cursor.execute(BGWRITER)
temp = cursor.fetchone()
self.add_derive_value('Background Writer/Checkpoints/Scheduled',
'checkpoints',
temp.get('checkpoints_timed', 0))
self.add_derive_value('Background Writer/Checkpoints/Requested',
'checkpoints',
temp.get('checkpoints_requests', 0))
def add_index_stats(self, cursor):
cursor.execute(INDEX_COUNT)
temp = cursor.fetchone()
self.add_gauge_value('Objects/Indexes', 'indexes',
temp.get('indexes', 0))
cursor.execute(INDEX_SIZE_ON_DISK)
temp = cursor.fetchone()
self.add_gauge_value('Disk Utilization/Indexes', 'bytes',
temp.get('size_indexes', 0))
def add_lock_stats(self, cursor):
cursor.execute(LOCKS)
temp = cursor.fetchall()
for lock in LOCK_MAP:
found = False
for row in temp:
if row['mode'] == lock:
found = True
self.add_gauge_value(LOCK_MAP[lock], 'locks',
int(row['count']))
if not found:
self.add_gauge_value(LOCK_MAP[lock], 'locks', 0)
def add_statio_stats(self, cursor):
cursor.execute(STATIO)
temp = cursor.fetchone()
self.add_derive_value('IO Operations/Heap/Reads', 'iops',
int(temp.get('heap_blocks_read', 0)))
self.add_derive_value('IO Operations/Heap/Hits', 'iops',
int(temp.get('heap_blocks_hit', 0)))
self.add_derive_value('IO Operations/Index/Reads', 'iops',
int(temp.get('index_blocks_read', 0)))
self.add_derive_value('IO Operations/Index/Hits', 'iops',
int(temp.get('index_blocks_hit', 0)))
self.add_derive_value('IO Operations/Toast/Reads', 'iops',
int(temp.get('toast_blocks_read', 0)))
self.add_derive_value('IO Operations/Toast/Hits', 'iops',
int(temp.get('toast_blocks_hit', 0)))
self.add_derive_value('IO Operations/Toast Index/Reads', 'iops',
int(temp.get('toastindex_blocks_read', 0)))
self.add_derive_value('IO Operations/Toast Index/Hits', 'iops',
int(temp.get('toastindex_blocks_hit', 0)))
def add_table_stats(self, cursor):
cursor.execute(TABLE_COUNT)
temp = cursor.fetchone()
self.add_gauge_value('Objects/Tables', 'tables',
temp.get('relations', 0))
cursor.execute(TABLE_SIZE_ON_DISK)
temp = cursor.fetchone()
self.add_gauge_value('Disk Utilization/Tables', 'bytes',
temp.get('size_relations', 0))
def add_transaction_stats(self, cursor):
cursor.execute(TRANSACTIONS)
temp = cursor.fetchone()
self.add_derive_value('Transactions/Committed', 'transactions',
int(temp.get('transactions_committed', 0)))
self.add_derive_value('Transactions/Rolled Back', 'transactions',
int(temp.get('transactions_rollback', 0)))
self.add_derive_value('Tuples/Read from Disk', 'tuples',
int(temp.get('blocks_read', 0)))
self.add_derive_value('Tuples/Read cache hit', 'tuples',
int(temp.get('blocks_hit', 0)))
self.add_derive_value('Tuples/Returned/From Sequential Scan',
'tuples',
int(temp.get('tuples_returned', 0)))
self.add_derive_value('Tuples/Returned/From Bitmap Scan',
'tuples',
int(temp.get('tuples_fetched', 0)))
self.add_derive_value('Tuples/Writes/Inserts', 'tuples',
int(temp.get('tuples_inserted', 0)))
self.add_derive_value('Tuples/Writes/Updates', 'tuples',
int(temp.get('tuples_updated', 0)))
self.add_derive_value('Tuples/Writes/Deletes', 'tuples',
int(temp.get('tuples_deleted', 0)))
def add_wal_stats(self, cursor):
cursor.execute(ARCHIVE)
temp = cursor.fetchone()
self.add_derive_value('Archive Status/Total', 'files',
temp.get('file_count', 0))
self.add_gauge_value('Archive Status/Ready', 'files',
temp.get('ready_count', 0))
self.add_derive_value('Archive Status/Done', 'files',
temp.get('done_count', 0))
def add_replication_stats(self, cursor):
cursor.execute(REPLICATION)
temp = cursor.fetchall()
for row in temp:
self.add_gauge_value('Replication/%s' % row.get('client_addr', 'Unknown'),
'byte_lag',
int(row.get('byte_lag', 0)))
def connect(self):
"""Connect to PostgreSQL, returning the connection object.
:rtype: psycopg2.connection
"""
conn = psycopg2.connect(**self.connection_arguments)
conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
return conn
@property
def connection_arguments(self):
"""Create connection parameter dictionary for psycopg2.connect
:return dict: The dictionary to be passed to psycopg2.connect
via double-splat
"""
filtered_args = ["name", "superuser", "relation_stats"]
args = {}
for key in set(self.config) - set(filtered_args):
if key == 'dbname':
args['database'] = self.config[key]
else:
args[key] = self.config[key]
return args
def poll(self):
self.initialize()
try:
self.connection = self.connect()
except psycopg2.OperationalError as error:
LOGGER.critical('Could not connect to %s, skipping stats run: %s',
self.__class__.__name__, error)
return
cursor = self.connection.cursor(cursor_factory=extras.RealDictCursor)
self.add_stats(cursor)
cursor.close()
self.connection.close()
self.finish()
@property
def server_version(self):
"""Return connection server version in PEP 369 format
:returns: tuple
"""
return (self.connection.server_version % 1000000 / 10000,
self.connection.server_version % 10000 / 100,
self.connection.server_version % 100)
| bsd-3-clause | -4,528,111,542,233,855,000 | 44.196078 | 100 | 0.555242 | false |
mattaustin/django-thummer | thummer/tasks.py | 1 | 1197 | # -*- coding: utf-8 -*-
#
# Copyright 2011-2018 Matt Austin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
try:
from celery import shared_task
except ImportError:
from functools import wraps
# Faux shared_task decorator
def shared_task(*args, **kwargs):
def factory(func):
@wraps(func)
def decorator(*args, **kwargs):
return func(*args, **kwargs)
return decorator
return factory
@shared_task(ignore_result=True)
def capture(pk):
from .models import WebpageSnapshot
instance = WebpageSnapshot.objects.get(pk=pk)
return instance._capture()
| apache-2.0 | 4,169,595,673,646,505,500 | 28.925 | 74 | 0.696742 | false |
RonnyPfannschmidt/pytest-xdist | src/xdist/remote.py | 1 | 9117 | """
This module is executed in remote subprocesses and helps to
control a remote testing session and relay back information.
It assumes that 'py' is importable and does not have dependencies
on the rest of the xdist code. This means that the xdist-plugin
needs not to be installed in remote environments.
"""
import sys
import os
import time
import py
import _pytest.hookspec
import pytest
from execnet.gateway_base import dumps, DumpError
from _pytest.config import _prepareconfig, Config
class WorkerInteractor(object):
def __init__(self, config, channel):
self.config = config
self.workerid = config.workerinput.get("workerid", "?")
self.log = py.log.Producer("worker-%s" % self.workerid)
if not config.option.debug:
py.log.setconsumer(self.log._keywords, None)
self.channel = channel
config.pluginmanager.register(self)
def sendevent(self, name, **kwargs):
self.log("sending", name, kwargs)
self.channel.send((name, kwargs))
def pytest_internalerror(self, excrepr):
for line in str(excrepr).split("\n"):
self.log("IERROR>", line)
def pytest_sessionstart(self, session):
self.session = session
workerinfo = getinfodict()
self.sendevent("workerready", workerinfo=workerinfo)
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
# in pytest 5.0+, exitstatus is an IntEnum object
self.config.workeroutput["exitstatus"] = int(exitstatus)
yield
self.sendevent("workerfinished", workeroutput=self.config.workeroutput)
def pytest_collection(self, session):
self.sendevent("collectionstart")
def pytest_runtestloop(self, session):
self.log("entering main loop")
torun = []
while 1:
try:
name, kwargs = self.channel.receive()
except EOFError:
return True
self.log("received command", name, kwargs)
if name == "runtests":
torun.extend(kwargs["indices"])
elif name == "runtests_all":
torun.extend(range(len(session.items)))
self.log("items to run:", torun)
# only run if we have an item and a next item
while len(torun) >= 2:
self.run_one_test(torun)
if name == "shutdown":
if torun:
self.run_one_test(torun)
break
return True
def run_one_test(self, torun):
items = self.session.items
self.item_index = torun.pop(0)
item = items[self.item_index]
if torun:
nextitem = items[torun[0]]
else:
nextitem = None
start = time.time()
self.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
duration = time.time() - start
self.sendevent(
"runtest_protocol_complete", item_index=self.item_index, duration=duration
)
def pytest_collection_finish(self, session):
self.sendevent(
"collectionfinish",
topdir=str(session.fspath),
ids=[item.nodeid for item in session.items],
)
def pytest_runtest_logstart(self, nodeid, location):
self.sendevent("logstart", nodeid=nodeid, location=location)
# the pytest_runtest_logfinish hook was introduced in pytest 3.4
if hasattr(_pytest.hookspec, "pytest_runtest_logfinish"):
def pytest_runtest_logfinish(self, nodeid, location):
self.sendevent("logfinish", nodeid=nodeid, location=location)
def pytest_runtest_logreport(self, report):
data = self.config.hook.pytest_report_to_serializable(
config=self.config, report=report
)
data["item_index"] = self.item_index
data["worker_id"] = self.workerid
assert self.session.items[self.item_index].nodeid == report.nodeid
self.sendevent("testreport", data=data)
def pytest_collectreport(self, report):
# send only reports that have not passed to master as optimization (#330)
if not report.passed:
data = self.config.hook.pytest_report_to_serializable(
config=self.config, report=report
)
self.sendevent("collectreport", data=data)
# the pytest_logwarning hook was deprecated since pytest 4.0
if hasattr(
_pytest.hookspec, "pytest_logwarning"
) and not _pytest.hookspec.pytest_logwarning.pytest_spec.get("warn_on_impl"):
def pytest_logwarning(self, message, code, nodeid, fslocation):
self.sendevent(
"logwarning",
message=message,
code=code,
nodeid=nodeid,
fslocation=str(fslocation),
)
# the pytest_warning_captured hook was introduced in pytest 3.8
if hasattr(_pytest.hookspec, "pytest_warning_captured"):
def pytest_warning_captured(self, warning_message, when, item):
self.sendevent(
"warning_captured",
warning_message_data=serialize_warning_message(warning_message),
when=when,
# item cannot be serialized and will always be None when used with xdist
item=None,
)
def serialize_warning_message(warning_message):
if isinstance(warning_message.message, Warning):
message_module = type(warning_message.message).__module__
message_class_name = type(warning_message.message).__name__
message_str = str(warning_message.message)
# check now if we can serialize the warning arguments (#349)
# if not, we will just use the exception message on the master node
try:
dumps(warning_message.message.args)
except DumpError:
message_args = None
else:
message_args = warning_message.message.args
else:
message_str = warning_message.message
message_module = None
message_class_name = None
message_args = None
if warning_message.category:
category_module = warning_message.category.__module__
category_class_name = warning_message.category.__name__
else:
category_module = None
category_class_name = None
result = {
"message_str": message_str,
"message_module": message_module,
"message_class_name": message_class_name,
"message_args": message_args,
"category_module": category_module,
"category_class_name": category_class_name,
}
# access private _WARNING_DETAILS because the attributes vary between Python versions
for attr_name in warning_message._WARNING_DETAILS:
if attr_name in ("message", "category"):
continue
attr = getattr(warning_message, attr_name)
# Check if we can serialize the warning detail, marking `None` otherwise
# Note that we need to define the attr (even as `None`) to allow deserializing
try:
dumps(attr)
except DumpError:
result[attr_name] = repr(attr)
else:
result[attr_name] = attr
return result
def getinfodict():
import platform
return dict(
version=sys.version,
version_info=tuple(sys.version_info),
sysplatform=sys.platform,
platform=platform.platform(),
executable=sys.executable,
cwd=os.getcwd(),
)
def remote_initconfig(option_dict, args):
option_dict["plugins"].append("no:terminal")
return Config.fromdictargs(option_dict, args)
def setup_config(config, basetemp):
config.option.looponfail = False
config.option.usepdb = False
config.option.dist = "no"
config.option.distload = False
config.option.numprocesses = None
config.option.maxprocesses = None
config.option.basetemp = basetemp
if __name__ == "__channelexec__":
channel = channel # noqa
workerinput, args, option_dict, change_sys_path = channel.receive()
if change_sys_path:
importpath = os.getcwd()
sys.path.insert(0, importpath)
os.environ["PYTHONPATH"] = (
importpath + os.pathsep + os.environ.get("PYTHONPATH", "")
)
os.environ["PYTEST_XDIST_WORKER"] = workerinput["workerid"]
os.environ["PYTEST_XDIST_WORKER_COUNT"] = str(workerinput["workercount"])
if hasattr(Config, "InvocationParams"):
config = _prepareconfig(args, None)
else:
config = remote_initconfig(option_dict, args)
config.args = args
setup_config(config, option_dict.get("basetemp"))
config._parser.prog = os.path.basename(workerinput["mainargv"][0])
config.workerinput = workerinput
config.workeroutput = {}
# TODO: deprecated name, backward compatibility only. Remove it in future
config.slaveinput = config.workerinput
config.slaveoutput = config.workeroutput
interactor = WorkerInteractor(config, channel)
config.hook.pytest_cmdline_main(config=config)
| mit | 4,634,997,689,573,559,000 | 34.337209 | 89 | 0.62707 | false |
crf1111/Bio-Informatics-Learning | Bio-StrongHold/src/Character_Based_Phylogeny.py | 1 | 3663 | import newick_git
class Node:
def __init__(self, taxon = None):
self.adjs = set()
for adj in self.adjs:
adj.add_adj(self)
self.taxon = taxon
def add_adj(self, branch):
self.adjs.add(branch)
if not branch.is_adj(self):
branch.add_adj(self)
def prune(self, branchs):
self.adjs = self.adjs.difference(set(branchs))
for branch in branchs:
if branch.is_adj(self):
branch.prune([self])
def fold(self, par = None):
# Internal node
if self.taxon == None:
ret = ",".join([adj.fold(self) for adj in self.adjs if adj != par])
if ret != "":
ret = "(" + ret + ")"
else: # Taxon node
ret = self.taxon
return ret
def taxa(self, par = None, ret = None):
if ret == None:
ret = set()
if self.taxon != None:
ret.add(self.taxon)
for adj in self.adjs:
if adj != par:
ret = adj.taxa(self,ret)
return ret
def split(self,cat):
branches = set()
bcat = set()
for adj in self.adjs:
subtaxa = adj.taxa(self)
if subtaxa.issubset(cat):
branches.add(adj)
bcat = bcat.union(subtaxa)
elif len(subtaxa.intersection(cat)) > 0:
return False
if bcat == cat:
ni = Node()
internals.append(ni)
self.prune(branches)
self.add_adj(ni)
for b in branches:
ni.add_adj(b)
return True
return False
def is_internal(self):
return self.taxon == None
def is_adj(self, branch):
return branch in self.adjs
def __str__(self):
if self.is_internal():
return str([adj.taxon for adj in self.adjs])
return str(self.taxon)
def __repr__(self):
return "taxon:" + str(self)
def check_struct(self):
for adj in self.adjs:
assert(self in adj.adjs)
def initialize(taxa):
global internals
root = Node()
internals = []
internals.append(root)
for taxon in taxa:
root.add_adj(Node(taxon))
def split(char,taxa):
cat = set()
for i in range(len(taxa)):
if char[i] == "0":
cat.add(taxa[i])
success = False
for n in internals:
if n.split(cat):
success = True
break
return success
def invert(s):
ret = ""
for i in range(len(s)):
if s[i] == "0":
ret += "1"
else:
ret += "0"
return ret
def gen_char(cat,taxa):
ret = ""
for i in range(len(taxa)):
if taxa[i] in cat:
ret += "1"
else:
ret += "0"
return ret
def consistent(chars,taxa):
initialize(taxa)
for char in chars:
if not split(char,taxa):
return False
return True
if __name__ == '__main__':
internals = []
with open('data/data.dat') as f:
line = f.readline()
line = line.strip()
taxa = line.split()
initialize(taxa)
chars = []
char = f.readline()
char = char.strip()
while char != "":
split(char,taxa)
chars.append(char)
char = f.readline()
char = char.strip()
r = internals.pop()
output = r.fold() + ";"
t = newick_git.newick_parse(output)
r = newick_git.edge_splits(t,taxa)
for char in chars:
assert(char in r or invert(char) in r)
print output | mit | 7,473,962,612,973,214,000 | 19.469274 | 79 | 0.487851 | false |
renskiy/fabricio | tests/__init__.py | 1 | 9002 | import argparse
import itertools
import shlex
import unittest2 as unittest
class FabricioTestCase(unittest.TestCase):
def command_checker(self, args_parsers=(), expected_args_set=(), side_effects=()):
def check_command_args(command, **kwargs):
try:
command_parser = next(args_parsers)
expected_command_args = next(expected_args_set)
side_effect = next(side_effects)
except StopIteration:
self.fail('unexpected command: {0}'.format(command))
args = shlex.split(command)
# removing \x00 necessary for Python 2.6
args = map(lambda arg: arg.replace('\x00', ''), args)
self.assertDictEqual(
expected_command_args,
vars(command_parser.parse_args(args)),
)
if isinstance(side_effect, Exception):
raise side_effect
return side_effect
if isinstance(args_parsers, argparse.ArgumentParser):
args_parsers = itertools.repeat(args_parsers)
else:
args_parsers = iter(args_parsers)
expected_args_set = iter(expected_args_set)
side_effects = iter(side_effects)
return check_command_args
class SucceededResult(str):
succeeded = True
failed = False
class FailedResult(str):
succeeded = False
failed = True
docker_run_args_parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
docker_run_args_parser.add_argument('executable', nargs=1)
docker_run_args_parser.add_argument('run_or_create', nargs=1)
docker_run_args_parser.add_argument('--user')
docker_run_args_parser.add_argument('--publish', action='append')
docker_run_args_parser.add_argument('--env', action='append')
docker_run_args_parser.add_argument('--volume', action='append')
docker_run_args_parser.add_argument('--link', action='append')
docker_run_args_parser.add_argument('--label', action='append')
docker_run_args_parser.add_argument('--add-host', action='append', dest='add-host')
docker_run_args_parser.add_argument('--net')
docker_run_args_parser.add_argument('--network')
docker_run_args_parser.add_argument('--mount')
docker_run_args_parser.add_argument('--restart')
docker_run_args_parser.add_argument('--stop-signal', dest='stop-signal')
docker_run_args_parser.add_argument('--detach', action='store_true')
docker_run_args_parser.add_argument('--tty', action='store_true')
docker_run_args_parser.add_argument('--interactive', action='store_true')
docker_run_args_parser.add_argument('--rm', action='store_true')
docker_run_args_parser.add_argument('--name')
docker_run_args_parser.add_argument('--custom-option', dest='custom-option')
docker_run_args_parser.add_argument('image')
docker_run_args_parser.add_argument('command', nargs=argparse.REMAINDER)
args_parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
args_parser.add_argument('args', nargs=argparse.REMAINDER)
# TODO use args_parser instead
docker_inspect_args_parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
docker_inspect_args_parser.add_argument('executable', nargs=2)
docker_inspect_args_parser.add_argument('--type')
docker_inspect_args_parser.add_argument('image_or_container')
# TODO use args_parser instead
docker_entity_inspect_args_parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
docker_entity_inspect_args_parser.add_argument('executable', nargs=3)
docker_entity_inspect_args_parser.add_argument('--format')
docker_entity_inspect_args_parser.add_argument('service')
docker_service_update_args_parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
docker_service_update_args_parser.add_argument('executable', nargs=3)
docker_service_update_args_parser.add_argument('--env-add', dest='env-add', action='append')
docker_service_update_args_parser.add_argument('--env-rm', dest='env-rm', action='append')
docker_service_update_args_parser.add_argument('--image')
docker_service_update_args_parser.add_argument('--mount-add', dest='mount-add', action='append')
docker_service_update_args_parser.add_argument('--mount-rm', dest='mount-rm', action='append')
docker_service_update_args_parser.add_argument('--name')
docker_service_update_args_parser.add_argument('--publish-add', dest='publish-add', action='append')
docker_service_update_args_parser.add_argument('--publish-rm', dest='publish-rm', action='append')
docker_service_update_args_parser.add_argument('--label-add', dest='label-add', action='append')
docker_service_update_args_parser.add_argument('--label-rm', dest='label-rm', action='append')
docker_service_update_args_parser.add_argument('--constraint-add', dest='constraint-add', action='append')
docker_service_update_args_parser.add_argument('--constraint-rm', dest='constraint-rm', action='append')
docker_service_update_args_parser.add_argument('--container-label-add', dest='container-label-add', action='append')
docker_service_update_args_parser.add_argument('--container-label-rm', dest='container-label-rm', action='append')
docker_service_update_args_parser.add_argument('--network-add', dest='network-add', action='append')
docker_service_update_args_parser.add_argument('--network-rm', dest='network-rm', action='append')
docker_service_update_args_parser.add_argument('--secret-add', dest='secret-add', action='append')
docker_service_update_args_parser.add_argument('--secret-rm', dest='secret-rm', action='append')
docker_service_update_args_parser.add_argument('--replicas')
docker_service_update_args_parser.add_argument('--restart-condition', dest='restart-condition')
docker_service_update_args_parser.add_argument('--user')
docker_service_update_args_parser.add_argument('--stop-grace-period', dest='stop-grace-period')
docker_service_update_args_parser.add_argument('--args')
docker_service_update_args_parser.add_argument('--custom_option')
docker_service_update_args_parser.add_argument('service')
docker_service_create_args_parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
docker_service_create_args_parser.add_argument('executable', nargs=3)
docker_service_create_args_parser.add_argument('--env', action='append')
docker_service_create_args_parser.add_argument('--mount', action='append')
docker_service_create_args_parser.add_argument('--name')
docker_service_create_args_parser.add_argument('--publish', action='append')
docker_service_create_args_parser.add_argument('--label', action='append')
docker_service_create_args_parser.add_argument('--host', action='append')
docker_service_create_args_parser.add_argument('--secret', action='append')
docker_service_create_args_parser.add_argument('--config', action='append')
docker_service_create_args_parser.add_argument('--group', action='append')
docker_service_create_args_parser.add_argument('--dns', action='append')
docker_service_create_args_parser.add_argument('--constraint', action='append')
docker_service_create_args_parser.add_argument('--container-label', dest='container-label', action='append')
docker_service_create_args_parser.add_argument('--placement-pref', dest='placement-pref', action='append')
docker_service_create_args_parser.add_argument('--dns-option', dest='dns-option', action='append')
docker_service_create_args_parser.add_argument('--dns-search', dest='dns-search', action='append')
docker_service_create_args_parser.add_argument('--replicas')
docker_service_create_args_parser.add_argument('--restart-condition', dest='restart-condition')
docker_service_create_args_parser.add_argument('--user')
docker_service_create_args_parser.add_argument('--network')
docker_service_create_args_parser.add_argument('--mode')
docker_service_create_args_parser.add_argument('--stop-grace-period', dest='stop-grace-period')
docker_service_create_args_parser.add_argument('--custom_option')
docker_service_create_args_parser.add_argument('image', nargs=1)
docker_service_create_args_parser.add_argument('args', nargs=argparse.REMAINDER)
docker_build_args_parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
docker_build_args_parser.add_argument('executable', nargs=2)
docker_build_args_parser.add_argument('--tag')
docker_build_args_parser.add_argument('--no-cache', type=int, dest='no-cache')
docker_build_args_parser.add_argument('--pull', nargs='?', const=True, type=int)
docker_build_args_parser.add_argument('--force-rm', nargs='?', const=True, type=int, dest='force-rm')
docker_build_args_parser.add_argument('--custom')
docker_build_args_parser.add_argument('--custom-bool', nargs='?', const=True, type=int, dest='custom-bool')
docker_build_args_parser.add_argument('path')
class Command(object):
def __init__(self, parser, args):
self.parser = parser
self.args = args
def __eq__(self, command):
command_args = vars(self.parser.parse_args(shlex.split(command)))
return self.args == command_args
def __ne__(self, command):
return not self.__eq__(command)
| mit | -4,580,777,568,621,133,300 | 53.228916 | 116 | 0.733504 | false |
pulkitpagare/adagio | adagio/core/featureAnalysis.py | 1 | 6583 | #!/usr/bin/python
# ADAGIO Android Application Graph-based Classification
# featureAnalysis.py >> Analysis of features from SVM linear model
# Copyright (c) 2013 Hugo Gascon <[email protected]>
import os
import sys
import os
import ml
import eval
import FCGextractor
import instructionSet
import random
import collections
import pz
import numpy as np
import scipy as sp
from random import shuffle
from progressbar import *
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
"""
Example:
import featureAnalysis as fa
w_binary = clf.best_estimator_.coef_[0]
w_agg = fa.aggregate_binary_svm_weights(w, 13)
"""
def print_largest_weights(w_agg, n):
""" Print the largest weights
"""
idx = w_agg.argsort()[::-1][:n]
w_agg_highest = w_agg[idx]
labels = [np.binary_repr(i,15) for i in idx]
print zip(w_agg_highest, labels)
def aggregate_binary_svm_weights(w_binary, expansion_bits):
""" Return the aggregated version of the SVM weight vector considering
the binary representation length of the original non-binary feature.
Args:
w_binary: an array of SVM weights related to binary features.
expansion_bits: the number of bits used to represent each feature in the
original feature vector.
Returns:
w: the aggregated version of the SVM weight vector
"""
feature_idx = len(w_binary) / expansion_bits #should be a int
w = np.array([sum( w_binary[expansion_bits * i : expansion_bits * (i + 1)] ) for i in range(feature_idx)])
return w
def compute_neighborhoods_per_weights(d, w, n_weights, n_files=300):
""" Write report with info about highed ranked neighborhoods in a samples
according to the weights learnt by the linear SVM model.
Args:
d: directory of the files to be processed
w: linear SVM weights
n_weights: number of weights to analyze
n_files: number of files to process from directory d
Returns:
Outputs the file feature_analysis.txt
"""
files = read_files(d, "fcgnx.pz", n_files)
sorted_weights_idx = w.argsort()[::-1]
f_out = "feature_analysis.txt".format(n_weights)
print "[*] Writing file {0}...".format(f_out)
fd = open(f_out, 'wb')
# fd.write("Total number of weights in SVM model: {0}\n".format(len(w)))
# fd.write("Selected number of highest weights per sample: {0}\n".format(n_weights))
for f in files:
fn = os.path.join(d,f)
neighborhoods, n_nodes = get_high_ranked_neighborhoods(fn, w, sorted_weights_idx, n_weights)
try:
if neighborhoods:
fd.write("\n\n#########################################\n\n")
fd.write(os.path.basename(f)+"\n\n")
fd.write("nodes: {0}\n\n".format(n_nodes))
fd.write("\n".join(neighborhoods))
except:
pass
fd.close()
print "[*] File written."
def get_high_ranked_neighborhoods(fcgnx_file, w, sorted_weights_idx, show_small=False, weights=1):
# g = FCGextractor.build_cfgnx(fcgnx_file)
g = pz.load(fcgnx_file)
g_hash = ml.neighborhood_hash(g)
neighborhoods = []
remaining_weights = weights
for idx in sorted_weights_idx:
if remaining_weights > 0:
label_bin = np.binary_repr( idx, 15 )
label = np.array( [ int(i) for i in label_bin ] )
matching_neighborhoods = []
for m, nh in g_hash.node.iteritems():
if np.array_equal( nh["label"], label ):
neighbors_l = g_hash.neighbors(m)
if neighbors_l:
neighbors = '\n'.join([str(i) for i in neighbors_l])
matching_neighborhoods.append("{0}\n{1}\n{2}\n".format(w[idx],
m, neighbors))
else:
if show_small:
matching_neighborhoods.append("{0}\n{1}\n".format(w[idx], m))
if matching_neighborhoods:
remaining_weights -= 1
neighborhoods += matching_neighborhoods
else:
n_nodes = g_hash.number_of_nodes()
del g
del g_hash
return neighborhoods, n_nodes
def add_weights_to_nodes(g, w, show_labels=True):
g_hash = ml.neighborhood_hash(g)
#initialize the weight for every node in g_hash
for n, nh in g_hash.node.iteritems():
idx = int("".join([str(i) for i in nh["label"]]), 2)
w_nh = w[idx]
g_hash.node[n]["label"] = w_nh
#create a copy of the weighted graph
g_hash_weighted = g_hash.copy()
#aggregate the weights of each node with the
#original weight of its caller
for n, nh in g_hash.node.iteritems():
for neighbor in g_hash.neighbors(n):
g_hash_weighted.node[neighbor]["label"] += g_hash.node[n]["label"]
#create array of the node weigths
g_weights = []
for n, nh in g_hash_weighted.node.iteritems():
g_weights.append(nh["label"])
#normalize weight between 0.5 and 1 to plot
g_weights = np.array(g_weights)
g_weights.sort()
g_weights_norm = normalize_weights(g_weights)
g_weights_norm = g_weights_norm[::-1]
d_w_norm = dict(zip(g_weights, g_weights_norm))
#add normalized weight as color to each node
for n, nh in g_hash_weighted.node.iteritems():
w = g_hash_weighted.node[n]["label"]
g_hash_weighted.node[n]["style"] = "filled"
g_hash_weighted.node[n]["fillcolor"] = "0.000 0.000 {0}".format(d_w_norm[w])
#write function name in the label of the node or remove label
if show_labels:
for n, nh in g_hash_weighted.node.iteritems():
node_text = n[0].split("/")[-1] + n[1] + "\n" + str(g_hash_weighted.node[n]["label"])
g_hash_weighted.node[n]["label"] = node_text
else:
for n, nh in g_hash_weighted.node.iteritems():
g_hash_weighted.node[n]["label"] = ""
return g_hash_weighted
def normalize_weights(a, imin=0.0, imax=1.0):
dmin = a.min()
dmax = a.max()
return imin + (imax - imin) * (a - dmin) / (dmax - dmin)
def read_files(d, file_extension, max_files=0):
files = []
for fn in os.listdir(d):
if fn.lower().endswith(file_extension):
files.append(os.path.join(d, fn))
shuffle(files)
#if max_files is 0, return all the files in dir
if max_files == 0:
max_files = len(files)
files = files[:max_files]
return files
| gpl-2.0 | 2,773,955,001,052,645,400 | 33.108808 | 110 | 0.598359 | false |
leanguru/teamforge-charts | teamforge_import/generate_mongoimport_files.py | 1 | 3993 | import json
from os import environ
from datetime import datetime
# Set the import timestamp
timestamp_string = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
# Set the path variable
path = environ["TMP_DIR"] + "/" + environ["TRACKER"]
# Generate Lookup Table for planning folders and import timestamp to planning folders
with open(path + '/planning_folders_processed.json') as data_file:
planning_folders = json.load(data_file)
planning_folder_lookup = {}
for i in range(len(planning_folders)):
id = planning_folders[i]['id']
title = planning_folders[i]['title']
planning_folder_lookup[id] = {'planningFolderTitle': title}
# Add Folder Paths to Planning Folder Lookup
with open(path + '/planning_folder_trace.txt') as f:
planning_folder_trace = f.readlines()
for line in planning_folder_trace:
folder_trace = line.rstrip().split(',')
# Trace the entire path, but ignore the last entry which is the 'PlanningApp...'
while len(folder_trace) > 1:
id = folder_trace[0]
if id not in [i for i in planning_folder_lookup.keys() if
'planningFolderTrace' in planning_folder_lookup[i].keys()]:
planning_folder_lookup[id]['planningFolderTrace'] = folder_trace
planning_folder_lookup[id]['planningFolderDepth'] = len(folder_trace)
planning_folder_lookup[id]['planningFolderPath'] = '>'.join(
map(lambda x: planning_folder_lookup[x]['planningFolderTitle'], reversed(folder_trace[:-1])))
folder_trace = folder_trace[1:]
# Add Planning Folder Trace and Planning Folder Path to planning folders
for i in range(len(planning_folders)):
id = planning_folders[i]['id']
if id in planning_folder_lookup:
planning_folders[i]['planningFolderTrace'] = planning_folder_lookup[id]['planningFolderTrace']
planning_folders[i]['planningFolderDepth'] = planning_folder_lookup[id]['planningFolderDepth']
planning_folders[i]['planningFolderPath'] = planning_folder_lookup[id]['planningFolderPath']
# Write Output File for planning Folders
with open(path + '/planning_folders_mongoimport.json', 'w') as outfile:
json.dump(planning_folders, outfile)
# For the deliverables:
# - Convert any number string into a real JSON number
# - Add Timestamp information
# - Add Planning Folder Path, Planning Folder Timestamp, Planning Folder Depth and Planning Folder Trace
with open(path + '/deliverables_processed.json') as data_file:
deliverables = json.load(data_file)
for i in range(len(deliverables)):
for key in deliverables[i]:
if deliverables[i][key] is not None and deliverables[i][key].isdigit():
deliverables[i][key] = int(deliverables[i][key])
elif deliverables[i][key] is not None and deliverables[i][key].replace('.', '', 1).isdigit():
deliverables[i][key] = float(deliverables[i][key])
planning_folder_id = deliverables[i]['planningFolderId']
if planning_folder_id:
deliverables[i]['planningFolderTitle'] = planning_folder_lookup[planning_folder_id]['planningFolderTitle']
deliverables[i]['planningFolderPath'] = planning_folder_lookup[planning_folder_id]['planningFolderPath']
deliverables[i]['planningFolderDepth'] = planning_folder_lookup[planning_folder_id]['planningFolderDepth']
deliverables[i]['planningFolderTrace'] = planning_folder_lookup[planning_folder_id]['planningFolderTrace']
# Remove any Variable Names with whitespaces since we can't process them
def removeWhiteSpaces(dictionary):
for key in dictionary.keys():
if dictionary[key] is dict:
removeWhiteSpaces(dictionary[key])
if len(key.split()) > 1:
new_key = ''.join(key.title().split())
new_key = new_key[0].lower() + new_key[1:]
dictionary[new_key] = dictionary.pop(key)
for deliverable in deliverables:
removeWhiteSpaces(deliverable)
# Write Mongodb import file for deliverables
with open(path + '/deliverables_mongoimport.json', 'w') as outfile:
json.dump(deliverables, outfile)
| mit | 3,191,176,110,185,584,000 | 42.402174 | 112 | 0.70849 | false |
totem/yoda-discover | tests/unit/test_util.py | 1 | 3775 | import discover.util
from mock import patch
from nose.tools import eq_
from discover.util import convert_to_milliseconds, DEFAULT_TIMEOUT_MS
def test_convert_to_milliseconds_for_timeout_in_hours():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('1h')
# Then: Expected timeout (int) is returned in ms
eq_(timeout_ms, 3600 * 1000)
def test_convert_to_milliseconds_for_timeout_in_minutes():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('5m')
# Then: Expected timeout (int) is returned in ms
eq_(timeout_ms, 5 * 60 * 1000)
def test_convert_to_milliseconds_for_timeout_in_seconds():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('5s')
# Then: Expected timeout (int) is returned in ms
eq_(timeout_ms, 5 * 1000)
def test_convert_to_milliseconds_for_timeout_in_milliseconds():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('5ms')
# Then: Expected timeout (int) is returned in ms
eq_(timeout_ms, 5)
def test_convert_to_milliseconds_for_invalid_timeout():
# When: I convert timeout to 'ms'
timeout_ms = convert_to_milliseconds('5dms')
# Then: DEFAULT_TIMEOUT_MS is returned
eq_(timeout_ms, DEFAULT_TIMEOUT_MS)
@patch('discover.util.urlopen')
def test_health_when_uri_is_specified(murlopen):
# When: I perform health test with given uri
healthy = discover.util.health_test('8080', 'mockhost', uri='/test')
# Then: http health test is performed
eq_(healthy, True)
murlopen.assert_called_once_with('http://mockhost:8080/test', None, 2)
@patch('discover.util.urlopen')
def test_health_when_uri_and_timeout_is_specified(murlopen):
# When: I perform health test with given uri
healthy = discover.util.health_test(8080, 'mockhost', uri='/test',
timeout='1m')
# Then: http health test is performed
eq_(healthy, True)
murlopen.assert_called_once_with('http://mockhost:8080/test', None, 60)
@patch('discover.util.socket')
def test_health_when_uri_is_not_specified(msocket):
# When: I perform health test with given uri
healthy = discover.util.health_test(8080, 'mockhost')
# Then: tcp test returns healthy
eq_(healthy, True)
@patch('discover.util.urlopen')
def test_http_when_urlopen_fails(murlopen):
# Given: An invalid uri
murlopen.side_effect = Exception('Invalid uri')
# When: I perform http_test with given uri
healthy = discover.util.http_test(8080, 'mockhost')
# Then: http test returns false
eq_(healthy, False)
murlopen.assert_called_once_with('http://mockhost:8080/health', None, 2)
@patch('discover.util.socket')
def test_port_when_port_is_not_listening(msocket):
# Given: Invalid Server
msocket.socket().connect.side_effect = Exception('Invalid server')
# When: I perform port_test
healthy = discover.util.port_test('8080', 'mockhost')
# Then: Port Test returns False
eq_(healthy, False)
@patch('discover.util.get_instance_metadata')
def test_map_proxy_host_using_ec2_metadata(mock_get):
# Given: Existing ec2 instance with metadata
mock_get().__getitem__.return_value = 'testhost'
# When: I map proxy host using ec2-metadata
host = discover.util.map_proxy_host('ec2:meta-data:mock')
# Then: Ec2 metadata gets resolved successfully
eq_(host, 'testhost')
mock_get().__getitem__.assert_called_once_with('mock')
@patch('discover.util.get_instance_metadata')
def test_map_proxy_host_using_actualhost(mock_get):
# When: I map proxy host using actual host
host = discover.util.map_proxy_host('testhost')
# Then: The actual host value is returned.
eq_(host, 'testhost')
| mit | 5,562,059,840,405,288,000 | 28.038462 | 76 | 0.685033 | false |
samuxiii/prototypes | bot/main.py | 1 | 2611 | import os
import dtree as dt
import telebot
from telebot import types
import time
from optparse import OptionParser
import wikiquery as wq
#get token from command line
parser = OptionParser()
parser.add_option("-t", "--token")
(options, args) = parser.parse_args()
TOKEN = options.token
if not TOKEN:
#try from environment
TOKEN = os.environ.get('TOKEN')
if not TOKEN:
print("Required TOKEN was not present")
exit(0)
#init telebot
bot = telebot.TeleBot(TOKEN)
smart = 0;
def listener(messages):
for m in messages:
if m.content_type == 'text':
cid = m.chat.id
print("[{}]:{}".format(str(cid), m.text))
bot.set_update_listener(listener)
#global variables
smart = 0
height = 0
weight = 0
foot = 0
# func #
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, "How are you doing?\nAvailable commands:\n/photo\n/say\n/gender")
@bot.message_handler(commands=['photo'])
def command_photo(m):
cid = m.chat.id
bot.send_photo( cid, open( 'l.jpg', 'rb'))
@bot.message_handler(commands=['say'])
def command_say(m):
cid = m.chat.id
bot.send_message( cid, 'fuck fuck.. XIII')
@bot.message_handler(func=lambda message: smart > 0)
def smart_actions(m):
global smart, weight, height, foot
cid = m.chat.id
if (smart == 1):
height = m.text
markup = types.ForceReply(selective=False)
bot.send_message( cid, 'How much do you weigh?', reply_markup=markup)
smart = 2
elif (smart == 2):
weight = m.text
markup = types.ForceReply(selective=False)
bot.send_message( cid, 'What size shoes do you wear?', reply_markup=markup)
smart = 3
else:
foot = m.text
smart = 0
prediction = dt.predictGender(height, weight, foot)
result_predicted = "According to your answer, you should be a " + str(prediction + "...")
bot.send_message(cid, result_predicted)
@bot.message_handler(commands=['gender'])
def command_smart(m):
global smart, height
cid = m.chat.id
smart = 1;
markup = types.ForceReply(selective=False)
bot.send_message( cid, 'How tall are you?', reply_markup=markup)
@bot.message_handler(commands=['x'])
def command_x(m):
cid = m.chat.id
string = ' '.join(m.text.split()[1:])
words = string.split('?')
topic = words[0]
text = ' '.join(words[1:])
if not '?' in string:
text = topic
topic = ""
print("{}:{}".format(topic, text))
bot.send_message( cid, wq.query(topic, text))
# end func #
bot.polling(none_stop=True)
| mit | -2,413,570,020,326,716,000 | 25.11 | 97 | 0.632325 | false |
LukasMosser/pmgan | main.py | 1 | 7590 | from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from dataset import HDF5Dataset, save_hdf5
import dcgan
import numpy as np
np.random.seed(43)
from multiprocessing import Pool
from dask_pipeline import run_analysis_pipeline
#Change workdir to where you want the files output
work_dir = os.path.expandvars('$PBS_O_WORKDIR/berea_test')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, help='3D')
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda' , action='store_true', help='enables cuda')
parser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
opt.manualSeed = 43 # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.dataset in ['3D']:
dataset = HDF5Dataset(opt.dataroot,
input_transform=transforms.Compose([
transforms.ToTensor()
]))
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 1
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
netG = dcgan.DCGAN3D_G(opt.imageSize, nz, nc, ngf, ngpu)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = dcgan.DCGAN3D_D(opt.imageSize, nz, nc, ngf, ngpu)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
input, noise, fixed_noise, fixed_noise_TI = None, None, None, None
input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1, 1)
fixed_noise = torch.FloatTensor(1, nz, 9, 9, 9).normal_(0, 1)
fixed_noise_TI = torch.FloatTensor(1, nz, 1, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 0.9
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
fixed_noise_TI = fixed_noise_TI.cuda()
input = Variable(input)
label = Variable(label)
noise = Variable(noise)
fixed_noise = Variable(fixed_noise)
fixed_noise_TI = Variable(fixed_noise_TI)
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr = opt.lr, betas = (opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr = opt.lr, betas = (opt.beta1, 0.999))
gen_iterations = 0
for epoch in range(opt.niter):
for i, data in enumerate(dataloader, 0):
f = open(work_dir+"training_curve.csv", "a")
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_cpu = data
batch_size = real_cpu.size(0)
input.data.resize_(real_cpu.size()).copy_(real_cpu)
label.data.resize_(batch_size).fill_(real_label)
output = netD(input)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.data.mean()
# train with fake
noise.data.resize_(batch_size, nz, 1, 1, 1)
noise.data.normal_(0, 1)
fake = netG(noise).detach()
label.data.fill_(fake_label)
output = netD(fake)
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
g_iter = 1
while g_iter != 0:
netG.zero_grad()
label.data.fill_(1.0) # fake labels are real for generator cost
noise.data.normal_(0, 1)
fake = netG(noise)
output = netD(fake)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
g_iter -= 1
gen_iterations += 1
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
f.write('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
f.write('\n')
f.close()
delete_var = True
if epoch % 1 == 0:
fake = netG(fixed_noise)
fake_TI = netG(fixed_noise_TI)
save_hdf5(fake.data, os.path.join(work_dir, 'fake_samples_{0}.hdf5'.format(gen_iterations)))
save_hdf5(fake_TI.data, os.path.join(work_dir, 'fake_TI_{0}.hdf5'.format(gen_iterations)))
fdir = os.path.join(work_dir, "epoch_"+str(epoch))
odir = os.path.join(work_dir, "orig")
os.mkdir(fdir)
for i in range(10):
fixed_noise.data.normal_(0, 1)
fake = netG(fixed_noise)
save_hdf5(fake.data, os.path.join(fdir, 'fake_samples_{0}.hdf5'.format(i)))
delete_var = run_analysis_pipeline(fdir, odir)
# do checkpointing
if epoch % 5 == 0 or delete_var == False:
torch.save(netG.state_dict(), os.path.join(fdir, 'netG_epoch_%d.pth' % (epoch)))
torch.save(netD.state_dict(), os.path.join(fdir, 'netD_epoch_%d.pth' % (epoch)))
| mit | 3,921,008,595,127,081,500 | 34.302326 | 113 | 0.615415 | false |
asweigart/stickysticks | tests/basicTests.py | 1 | 2372 | import unittest
import sys
import os
sys.path.append(os.path.abspath('..'))
import stickysticks
class BasicTests(unittest.TestCase):
def test_stickDefaults(self):
st = stickysticks.Stick(0, 100, (0,0))
self.assertEqual(st.start, (0, 0))
self.assertEqual(st.end, (100, 0))
self.assertEqual(st.length, 100)
st2 = stickysticks.Stick(0, 100, (st, 1.0))
self.assertEqual(st2.start, (100, 0))
self.assertEqual(st2.end, (200, 0))
self.assertEqual(st2.length, 100)
def test_rotate(self):
st1 = stickysticks.Stick()
st2 = stickysticks.Stick(shoulder=st1)
st3 = stickysticks.Stick(shoulder=st2)
st1.rotate(90)
self.assertEqual(st1.start, (0, 0))
self.assertEqual(st1.end, (0, 100))
self.assertEqual(st2.start, (0, 100))
self.assertEqual(st2.end, (0, 200))
self.assertEqual(st3.start, (0, 200))
self.assertEqual(st3.end, (0, 300))
st1.rotate(-90)
self.assertEqual(st1.start, (0, 0))
self.assertEqual(st1.end, (100, 0))
self.assertEqual(st2.start, (100, 0))
self.assertEqual(st2.end, (200, 0))
self.assertEqual(st3.start, (200, 0))
self.assertEqual(st3.end, (300, 0))
st1.rotate(45)
roundedEnd = (round(st1.end[0], 5), round(st1.end[1], 5))
self.assertEqual(st1.start, (0, 0))
self.assertEqual(roundedEnd, (70.71068, 70.71068))
st1.rotate(-90)
roundedEnd = (round(st1.end[0], 5), round(st1.end[1], 5))
self.assertEqual(st1.start, (0, 0))
self.assertEqual(roundedEnd, (70.71068, -70.71068))
def test_rotateCommutative(self):
st1 = stickysticks.Stick()
st2 = stickysticks.Stick(shoulder=st1)
st3 = stickysticks.Stick(shoulder=st2)
for st1_i in range(5):
st1.rotate(17) # just rotate it to a weird angle
for st2_i in range(5):
st2.rotate(17) # just rotate it to a weird angle
lastPointStartsAt = st3.end
st3.rotate(23)
st3.rotate(31)
st3.rotate(36)
st3.rotate(-90) # rotate back to origin orientation
self.assertEqual((st1_i, st2_i, lastPointStartsAt), (st1_i, st2_i, st3.end))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 9,019,264,947,030,571,000 | 30.626667 | 92 | 0.577993 | false |
vision1983/pyipcalc | tests/test.py | 1 | 1818 | # PyIPCalc
#
# Copyright (c) 2017, Christiaan Frans Rademan.
# All rights reserved.
#
# LICENSE: (BSD3-Clause)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENTSHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
log = logging.getLogger(__name__)
if __name__ == "__main__":
alltests = unittest.TestLoader().discover('tests',pattern='*.py')
unittest.TextTestRunner(verbosity=2).run(alltests)
| bsd-3-clause | 7,228,426,390,637,662,000 | 45.615385 | 79 | 0.769527 | false |
pferreir/PySecretHandshake | secret_handshake/boxstream.py | 1 | 2543 | import struct
from asyncio import IncompleteReadError
from async_generator import async_generator, yield_
from nacl.secret import SecretBox
from .util import inc_nonce, split_chunks
HEADER_LENGTH = 2 + 16 + 16
MAX_SEGMENT_SIZE = 4 * 1024
TERMINATION_HEADER = (b'\x00' * 18)
def get_stream_pair(reader, writer, **kwargs):
"""Return a tuple with `(unbox_stream, box_stream)` (reader/writer).
:return: (:class:`secret_handshake.boxstream.UnboxStream`,
:class:`secret_handshake.boxstream.BoxStream`) """
box_args = {
'key': kwargs['encrypt_key'],
'nonce': kwargs['encrypt_nonce'],
}
unbox_args = {
'key': kwargs['decrypt_key'],
'nonce': kwargs['decrypt_nonce'],
}
return UnboxStream(reader, **unbox_args), BoxStream(writer, **box_args)
class UnboxStream(object):
def __init__(self, reader, key, nonce):
self.reader = reader
self.key = key
self.nonce = nonce
self.closed = False
async def read(self):
try:
data = await self.reader.readexactly(HEADER_LENGTH)
except IncompleteReadError:
self.closed = True
return None
box = SecretBox(self.key)
header = box.decrypt(data, self.nonce)
if header == TERMINATION_HEADER:
self.closed = True
return None
length = struct.unpack('>H', header[:2])[0]
mac = header[2:]
data = await self.reader.readexactly(length)
body = box.decrypt(mac + data, inc_nonce(self.nonce))
self.nonce = inc_nonce(inc_nonce(self.nonce))
return body
@async_generator
async def __aiter__(self):
while True:
data = await self.read()
if data is None:
return
await yield_(data)
class BoxStream(object):
def __init__(self, writer, key, nonce):
self.writer = writer
self.key = key
self.box = SecretBox(self.key)
self.nonce = nonce
def write(self, data):
for chunk in split_chunks(data, MAX_SEGMENT_SIZE):
body = self.box.encrypt(chunk, inc_nonce(self.nonce))[24:]
header = struct.pack('>H', len(body) - 16) + body[:16]
hdrbox = self.box.encrypt(header, self.nonce)[24:]
self.writer.write(hdrbox)
self.nonce = inc_nonce(inc_nonce(self.nonce))
self.writer.write(body[16:])
def close(self):
self.writer.write(self.box.encrypt(b'\x00' * 18, self.nonce)[24:])
| mit | 436,206,435,234,890,000 | 27.255556 | 75 | 0.587102 | false |
fortes/mashpress | handlers.py | 1 | 4427 | from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.ext.webapp import template
import logging
import models
import os
class SlashRedirectHandler(webapp.RequestHandler):
"""Strip off slashes and permanent redirect to the slashless path"""
def get(self, path):
self.redirect(path, permanent=True)
class DateHandler(webapp.RequestHandler):
"""Redirect to the main archive page, with a hash for the year/month"""
def get(self, year, month=None):
url = '/archive#y-' + year
if month:
url += '-m-%s' % int(month)
self.redirect(url)
class BaseHandler(webapp.RequestHandler):
"""Base handler, provides render_template_to_response/string with default parameters"""
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
def render_text_to_response(self, text, content_type=None):
# App engine uses text/html and utf-8 by default
# http://code.google.com/appengine/docs/python/tools/webapp/buildingtheresponse.html
if content_type != None:
self.response.content_type = content_type
self.response.out.write(text)
def render_template_to_response(self, template_name='index', values={}, format='html'):
html, content_type = self.render_template(template_name, values, format)
self.render_text_to_response(html, content_type)
def render_template(self, template_name='index', values={}, format='html'):
values.update({
'settings': models.Setting.get_dictionary()
})
content_type = None
if format == 'feed':
content_type = 'application/atom+xml; charset=utf-8'
template_path = os.path.join(self.template_dir, template_name + '.' + format)
html = template.render(template_path, values)
return html, content_type
class SiteHandler(BaseHandler):
"""Handle the audience-facing side of the site"""
def get(self, slug):
item = models.Item.get_by_slug(slug)
if not item:
return self.redirect_or_404(slug)
self.render_template_to_response('item', {
'item': item,
'title': item.title
})
def redirect_or_404(self, slug):
"""Find out if the slug was previously used. If so, redirect. Otherwise, 404"""
alias = models.Alias.get_by_slug(slug)
if alias and alias.item.is_published:
self.redirect(alias.item.slug, permanent=True)
else:
self.error(404)
self.render_template_to_response('404', {
'path': slug,
'title': "Not Found"
})
class RootHandler(SiteHandler):
"""Handle the root element"""
def get(self):
html = memcache.get('root_html')
# Cache miss
if not html:
root, posts = self.root_and_posts()
html, _ = self.render_template('base', {
'item': root,
'posts': posts.fetch(10)
})
memcache.set('root_html', html)
self.render_text_to_response(html)
@classmethod
def root_and_posts(klass):
root = models.Item.get_by_slug('/')
posts = models.Item.all_published_posts()
return root, posts
class ArchiveHandler(RootHandler):
def get(self):
html = memcache.get('archive_html')
if not html:
root, posts = self.root_and_posts()
html, _ = self.render_template('archive', {
'item': root,
'posts': posts
})
memcache.set('archive_html', html)
self.render_text_to_response(html)
class FeedHandler(RootHandler):
def get(self):
# When feedburner is enabled, only give feedburner bot access
# to the feed, all others get redirected
feed_address = models.Setting.get_dictionary()['feedburner_address']
if feed_address:
userAgent = self.request.headers.get('User-Agent', '').lower()
if not 'feedburner' in userAgent:
return self.redirect(feed_address)
root, posts = self.root_and_posts()
# Render the feed
self.render_template_to_response('atom', {
'item': root,
'posts': posts.fetch(10)
}, 'feed')
| mit | 6,586,475,358,431,210,000 | 32.285714 | 92 | 0.605376 | false |
angr/angr | angr/analyses/decompiler/ailgraph_walker.py | 1 | 1603 |
import networkx # pylint:disable=unused-import
class RemoveNodeNotice(Exception):
pass
class AILGraphWalker:
"""
Walks an AIL graph and optionally replaces each node with a new node.
"""
def __init__(self, graph, handler, replace_nodes: bool=False):
self.graph = graph # type: networkx.DiGraph
self.handler = handler
self._replace_nodes = replace_nodes
def walk(self):
for node in list(self.graph.nodes()):
try:
r = self.handler(node)
remove = False
except RemoveNodeNotice:
# we need to remove this node
r = None
remove = True
if self._replace_nodes:
if remove:
self.graph.remove_node(node)
elif r is not None and r is not node:
in_edges = list(self.graph.in_edges(node, data=True))
out_edges = list(self.graph.out_edges(node, data=True))
self.graph.remove_node(node)
self.graph.add_node(r)
for src, _, data in in_edges:
if src is node:
self.graph.add_edge(r, r, **data)
else:
self.graph.add_edge(src, r, **data)
for _, dst, data in out_edges:
if dst is node:
self.graph.add_edge(r, r, **data)
else:
self.graph.add_edge(r, dst, **data)
| bsd-2-clause | -3,466,230,535,631,935,000 | 31.06 | 75 | 0.470992 | false |
brocaar/flask-views | docs/conf.py | 1 | 7870 | # -*- coding: utf-8 -*-
#
# Flask-Views documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 1 20:28:05 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('_themes'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Views'
copyright = u'2012, <a href="http://brocaar.com/">Orne Brocaar</a>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Viewsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-Views.tex', u'Flask-Views Documentation',
u'Orne Brocaar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-views', u'Flask-Views Documentation',
[u'Orne Brocaar'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flask-Views', u'Flask-Views Documentation',
u'Orne Brocaar', 'Flask-Views', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause | -1,222,660,194,973,458,700 | 31.254098 | 80 | 0.703558 | false |
limbail/ceamon | pandorabox/ceamon/views.py | 1 | 5902 | from django.shortcuts import render
from django.views.generic import ListView
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseRedirect
# IMPORT REST
from rest_framework import status, generics, mixins, viewsets
from rest_framework.decorators import api_view
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework import generics, filters
from rest_framework.response import Response
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import permissions
from ceamon.permissions import IsOwnerOrReadOnly
from django.contrib.auth.models import User, Group
from ceamon.serializers import UserSerializer, GroupSerializer
# IMPORT
from ceamon.serializers import sapnodeSerializer, StatusSerializer
from ceamon.models import sapnode, StatusModel
def detail(request, question_id):
return HttpResponse("Estas viendo el server %s." % server_id)
class JSONResponse(HttpResponse):
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
@api_view(['GET', 'POST'])
def sapnode_list(request, format=None):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
content = {
'user': unicode(request.user), # `django.contrib.auth.User` instance.
'auth': unicode(request.auth), # None
}
if request.method == 'GET':
l_sapnode = sapnode.objects.all()
serializer = sapnodeSerializer(l_sapnode, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = sapnodeSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE', 'POST'])
def sapnode_detail(request, pk, format=None,):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
try:
l_sapnode = sapnode.objects.get(pk=pk)
except sapnode.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = sapnodeSerializer(l_sapnode)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = sapnodeSerializer(l_sapnode, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'POST':
serializer = sapnodeSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
l_sapnode.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
@api_view(['GET', 'PUT' , 'POST'])
def StatusViewSet(request, format=None):
#l_StatusModel = StatusModel.objects.all(pk=pk)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
content = {
'user': unicode(request.user), # `django.contrib.auth.User` instance.
'auth': unicode(request.auth), # None
}
if request.method == 'GET':
l_StatusModel = StatusModel.objects.all()
serializer = StatusSerializer(l_StatusModel, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = StatusSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PUT':
serializer = StatusSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE', 'POST'])
def status_detail(request, pk, format=None):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
try:
l_StatusModel = StatusModel.objects.get(pk=pk)
except StatusModel.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = StatusSerializer(l_StatusModel)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = StatusSerializer(l_StatusModel, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'POST':
serializer = StatusSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
StatusModel.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| mit | 5,553,470,778,808,345,000 | 34.769697 | 84 | 0.699763 | false |
Brainiq7/Ananse | ananse_dl/extractor/streamcloud.py | 1 | 1923 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
class StreamcloudIE(InfoExtractor):
IE_NAME = 'streamcloud.eu'
_VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'
_TEST = {
'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
'md5': '6bea4c7fa5daaacc2a946b7146286686',
'info_dict': {
'id': 'skp9j99s4bpz',
'ext': 'mp4',
'title': 'ananse test video \'/\\ ä ↭',
},
'skip': 'Only available from the EU'
}
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://streamcloud.eu/%s' % video_id
orig_webpage = self._download_webpage(url, video_id)
fields = re.findall(r'''(?x)<input\s+
type="(?:hidden|submit)"\s+
name="([^"]+)"\s+
(?:id="[^"]+"\s+)?
value="([^"]*)"
''', orig_webpage)
post = compat_urllib_parse.urlencode(fields)
self._sleep(12, video_id)
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
req = compat_urllib_request.Request(url, post, headers)
webpage = self._download_webpage(
req, video_id, note='Downloading video page ...')
title = self._html_search_regex(
r'<h1[^>]*>([^<]+)<', webpage, 'title')
video_url = self._search_regex(
r'file:\s*"([^"]+)"', webpage, 'video URL')
thumbnail = self._search_regex(
r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
}
| unlicense | -2,342,502,022,879,466,000 | 29.967742 | 108 | 0.5125 | false |
gencer/sentry | tests/sentry/api/endpoints/test_project_releases.py | 1 | 10374 | from __future__ import absolute_import
from datetime import datetime
from django.core.urlresolvers import reverse
from sentry.models import Release, ReleaseCommit, ReleaseProject
from sentry.testutils import APITestCase
class ProjectReleaseListTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(team=team, name='foo')
project2 = self.create_project(team=team, name='bar')
release1 = Release.objects.create(
organization_id=project1.organization_id,
version='1',
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386),
)
release1.add_project(project1)
ReleaseProject.objects.filter(project=project1, release=release1).update(new_groups=5)
release2 = Release.objects.create(
organization_id=project1.organization_id,
version='2',
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386),
)
release2.add_project(project1)
release3 = Release.objects.create(
organization_id=project1.organization_id,
version='3',
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386),
date_released=datetime(2013, 8, 15, 3, 8, 24, 880386),
)
release3.add_project(project1)
release4 = Release.objects.create(
organization_id=project2.organization_id,
version='4',
)
release4.add_project(project2)
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
}
)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 3
assert response.data[0]['version'] == release3.version
assert response.data[1]['version'] == release2.version
assert response.data[2]['version'] == release1.version
assert response.data[2]['newGroups'] == 5
def test_query_filter(self):
self.login_as(user=self.user)
team = self.create_team()
project = self.create_project(team=team, name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='foobar',
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386),
)
release.add_project(project)
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.get(url + '?query=foo', format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['version'] == release.version
response = self.client.get(url + '?query=bar', format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 0
class ProjectReleaseCreateTest(APITestCase):
def test_minimal(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1',
}
)
assert response.status_code == 201, response.content
assert response.data['version']
release = Release.objects.get(
version=response.data['version'],
)
assert not release.owner
assert release.organization == project.organization
assert release.projects.first() == project
def test_ios_release(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1 (123)',
}
)
assert response.status_code == 201, response.content
assert response.data['version']
release = Release.objects.get(
version=response.data['version'],
)
assert not release.owner
assert release.organization == project.organization
assert release.projects.first() == project
def test_duplicate(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
release = Release.objects.create(version='1.2.1', organization_id=project.organization_id)
release.add_project(project)
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1',
}
)
assert response.status_code == 208, response.content
def test_duplicate_accross_org(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
release = Release.objects.create(version='1.2.1', organization_id=project.organization_id)
release.add_project(project)
project2 = self.create_project(name='bar', organization=project.organization)
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project2.organization.slug,
'project_slug': project2.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1',
}
)
# since project2 was added, should be 201
assert response.status_code == 201, response.content
assert Release.objects.filter(
version='1.2.1', organization_id=project.organization_id
).count() == 1
assert ReleaseProject.objects.get(release=release, project=project)
assert ReleaseProject.objects.get(release=release, project=project2)
def test_version_whitespace(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.3\n',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '\n1.2.3',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '1.\n2.3',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '1.2.3\f',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '1.2.3\t',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '1.2.3',
}
)
assert response.status_code == 201, response.content
assert response.data['version'] == '1.2.3'
release = Release.objects.get(
organization_id=project.organization_id,
version=response.data['version'],
)
assert not release.owner
def test_features(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1',
'owner': self.user.email,
}
)
assert response.status_code == 201, response.content
assert response.data['version']
release = Release.objects.get(
organization_id=project.organization_id,
version=response.data['version'],
)
assert release.owner == self.user
def test_commits(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={'version': '1.2.1',
'commits': [
{
'id': 'a' * 40
},
{
'id': 'b' * 40
},
]}
)
assert response.status_code == 201, (response.status_code, response.content)
assert response.data['version']
release = Release.objects.get(
organization_id=project.organization_id,
version=response.data['version'],
)
rc_list = list(
ReleaseCommit.objects.filter(
release=release,
).select_related('commit', 'commit__author').order_by('order')
)
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id
| bsd-3-clause | 7,579,397,420,073,799,000 | 29.783383 | 98 | 0.538076 | false |
waelkht/Onto.KOM | ontokom/preprocessing.py | 1 | 4861 | import re
import os
from glob import glob
from textblob import TextBlob
from nltk.corpus import stopwords
from nltk import download
from tqdm import tqdm
def download_preprocessing_prerequisites():
"""Downloads the NLTK prerequisites needed for other functions"""
download("averaged_perceptron_tagger") # POS Tags
download("stopwords") # Stop words
download("brown") # Noun phrases
download("punkt") # Noun phrases
def text_blob_from_file(file_path):
"""Loads a `TextBlob` from `file_path`"""
with open(file_path, "r", encoding="utf-8") as text_file:
return TextBlob(text_file.read())
def remove_stop_words(text_blob):
"""Removes all stop words from `text_blob` and returns the resulting `TextBlob`"""
# Get words from original text, remove the stop words and combine the
# words again
words = text_blob.words
stop_words = [stop_word.lower()
for stop_word in stopwords.words("english")]
words = filter(lambda word: not word.lower() in stop_words, words)
return TextBlob(" ".join(words))
def find_noun_phrases(text_blob):
"""Returns all noun phrases found in `text_blob`"""
tags = text_blob.tags
noun_phrases = []
current_noun_phrase = []
current_noun_phrase_pos = []
# Find the noun phrases sequentially based on the POS tags
for (word, pos) in tags:
if re.match("^[a-zA-Z]*$", word):
if current_noun_phrase == [] or current_noun_phrase_pos[-1] == "JJ":
if pos in ["NN", "NNS", "NP", "NPS", "JJ"]:
current_noun_phrase.append(word)
current_noun_phrase_pos.append(pos)
else:
if pos in ["NN", "NNS", "NP", "NPS"]:
current_noun_phrase.append(word)
current_noun_phrase_pos.append(pos)
else:
if ((len(current_noun_phrase) == 1 and not current_noun_phrase_pos[0] == "JJ")
or len(current_noun_phrase) > 1):
noun_phrases.append(" ".join(current_noun_phrase))
current_noun_phrase = []
current_noun_phrase_pos = []
return noun_phrases
def link_noun_phrases(text_blob):
"""Returns a `TextBlob` with all noun phrases in `text_blob` linked by underscores"""
#noun_phrases = text_blob.noun_phrases
noun_phrases = find_noun_phrases(text_blob)
# Sort the noun phrases by occurences of spaces so we replace those first
noun_phrases = sorted(noun_phrases, reverse=True,
key=lambda np: np.count(" "))
# Select only noun phrases that don't consist of single words (ie. at least a space or hyphen)
# Replace all spaces with underscores and remove hyphens
replacements = [(np, np.replace(" ", "_").replace("-", "")) for np in
filter(lambda word: word.count(" ") > 0 or word.count("-") > 0, noun_phrases)]
text_blob_str = str(text_blob)
for noun_phrase, joined_noun_phrase in replacements:
text_blob_str = text_blob_str.replace(noun_phrase, joined_noun_phrase)
return TextBlob(text_blob_str)
def convert_wiki_dump(wiki_dump_path, out_path, wiki_extractor_path):
"""Converts a wikipedia dump at `wiki_dump_path` to multiple text files
saved to `out_path` using the WikiExtractor.py script at `wiki_extractor_path`"""
print("Extracting data from wikidump")
#os.system("python %s %s -b 1000M -q -o %s" %
# (wiki_extractor_path, wiki_dump_path, out_path))
print("Converting xml to text files")
_split_wiki_articles(out_path, out_path)
def _get_wiki_article_title(article):
"""This function finds the article name for an Wikipedia article"""
title = re.findall(r"(title=\")(.+?)(\")", article)
if len(title) == 0 or len(title[0]) <= 1:
return None
return title[0][1]
def _split_wiki_articles(raw_article_file_path, article_out_path):
"""This script is used to split Wikipedia articles extracted from a Wikipedia
dump into seperate files for every article"""
wiki_files = glob(os.path.join(raw_article_file_path, "AA", "wiki_*"))
print("Found", len(wiki_files), "files to process")
for raw_file_path in wiki_files:
print("Processing", raw_file_path)
with open(raw_file_path, "r") as raw_file:
articles = re.split("<doc", raw_file.read())[2:]
for article in tqdm(articles):
title = _get_wiki_article_title(article)
if title is not None and "/" not in title:
article_path = os.path.join(article_out_path, title + ".txt")
with open(article_path, "w") as out_file:
out_file.writelines(
"\n".join(article.split("\n")[3:-3]).lower())
| mit | 887,392,361,918,140,200 | 37.888 | 98 | 0.608517 | false |
joke2k/faker | faker/providers/color/uk_UA/__init__.py | 1 | 10523 | from collections import OrderedDict
from .. import Provider as ColorProvider
class Provider(ColorProvider):
"""Implement color provider for ``uk_UA`` locale.
Sources:
- https://uk.wikipedia.org/wiki/Список_кольорів
"""
all_colors = OrderedDict((
('Абрикосовий', '#FBCEB1'),
('Аквамариновий', '#7FFFD4'),
('Алізариновий червоний', '#E32636'),
('Амарантовий', '#E52B50'),
('Амарантово-рожевий', '#F19CBB'),
('Аметистовий', '#9966CC'),
('Андроїдний зелений', '#A4C639'),
('Арсеновий', '#3B444B'),
('Атомний мандаріновий', '#FF9966'),
('Багряний', '#FF2400'),
('Баклажановий', '#990066'),
('Барвінковий', '#CCCCFF'),
('Бежевий', '#F5F5DC'),
('Берлінська лазур', '#003153'),
('Блаватний', '#6495ED'),
('Блакитний', '#AFEEEE'),
('Блакитний Брандейса', '#0070FF'),
('Блакитно-зелений', '#00DDDD'),
('Блакитно-фіолетовий', '#8A2BE2'),
('Блідий рожево-ліловий', '#996666'),
('Блідо-брунатний', '#987654'),
('Блідо-волошковий', '#ABCDEF'),
('Блідо-карміновий', '#AF4035'),
('Блідо-каштановий', '#DDADAF'),
('Блідо-пурпуровий', '#F984E5'),
('Блідо-пісочний', '#DABDAB'),
('Блідо-рожевий', '#FADADD'),
('Болотний', '#ACB78E'),
('Бронзовий', '#CD7F32'),
('Брунатний', '#964B00'),
('Брунато-малиновий', '#800000'),
('Будяковий', '#D8BFD8'),
('Бузковий', '#C8A2C8'),
('Бургундський', '#900020'),
('Бурий', '#755A57'),
('Бурштиновий', '#FFBF00'),
('Білий', '#FFFFFF'),
('Білий навахо', '#FFDEAD'),
('Бірюзовий', '#30D5C8'),
('Бістр', '#3D2B1F'),
('Вода пляжа Бонді', '#0095B6'),
('Вохра', '#CC7722'),
('Відбірний жовтий', '#FFBA00'),
('Візантійський', '#702963'),
('Гарбуз', '#FF7518'),
('Гарячо-рожевий', '#FC0FC0'),
('Геліотроп', '#DF73FF'),
('Глибокий фіолетовий', '#423189'),
('Глицінія', '#C9A0DC'),
('Грушевий', '#D1E231'),
('Гумігут', '#E49B0F'),
('Гірчичний', '#FFDB58'),
('Дерева', '#79443B'),
('Джинсовий', '#1560BD'),
('Діамантово-рожевий', '#FF55A3'),
('Жовтий', '#FFFF00'),
('Жовто-зелений', '#ADFF2F'),
('Жовто-персиковий', '#FADFAD'),
('Захисний синій', '#1E90FF'),
('Зелена весна', '#00FF7F'),
('Зелена мʼята', '#98FF98'),
('Зелена сосна', '#01796F'),
('Зелене море', '#2E8B57'),
('Зелений', '#00FF00'),
('Зелений армійський', '#4B5320'),
('Зелений мох', '#ADDFAD'),
('Зелений папороть', '#4F7942'),
('Зелений чай', '#D0F0C0'),
('Зелено-сірий чай', '#CADABA'),
('Зеленувато-блакитний', '#008080'),
('Золотаво-березовий', '#DAA520'),
('Золотий', '#FFD700'),
('Золотисто-каштановий', '#6D351A'),
('Індиго', '#4B0082'),
('Іржавий', '#B7410E'),
('Кардинал (колір)', '#C41E3A'),
('Карміновий', '#960018'),
('Каштановий', '#CD5C5C'),
('Кобальтовий', '#0047AB'),
('Колір жовтого шкільного автобуса', '#FFD800'),
('Колір засмаги', '#D2B48C'),
('Колір морської піни', '#FFF5EE'),
('Колір морської хвилі', '#00FFFF'),
('Кораловий', '#FF7F50'),
('Королівський синій', '#4169E1'),
('Кремовий', '#FFFDD0'),
('Кукурудзяний', '#FBEC5D'),
('Кіновар', '#FF4D00'),
('Лавандний', '#E6E6FA'),
('Лазуровий', '#007BA7'),
('Лазурово-синій', '#2A52BE'),
('Лайм', '#CCFF00'),
('Латунний', '#B5A642'),
('Лимонний', '#FDE910'),
('Лимонно-кремовий', '#FFFACD'),
('Лляний', '#EEDC82'),
('Лляний', '#FAF0E6'),
('Лососевий', '#FF8C69'),
('Ліловий', '#DB7093'),
('Малахітовий', '#0BDA51'),
('Малиновий', '#DC143C'),
('Мандариновий', '#FFCC00'),
('Мисливський', '#004225'),
('Морквяний', '#ED9121'),
('Мідний', '#B87333'),
('Міжнародний помаранчевий', '#FF4F00'),
('Нефритовий', '#00A86B'),
('Ніжно-блакитний', '#E0FFFF'),
('Ніжно-оливковий', '#6B8E23'),
('Ніжно-рожевий', '#FB607F'),
('Оливковий', '#808000'),
('Опівнічно-синій', '#003366'),
('Орхідея', '#DA70D6'),
('Палена сіена', '#E97451'),
('Палений оранжевий', '#CC5500'),
('Панг', '#C7FCEC'),
('Паросток папаї', '#FFEFD5'),
('Пастельно-зелений', '#77DD77'),
('Пастельно-рожевий', '#FFD1DC'),
('Персиковий', '#FFE5B4'),
('Перський синій', '#6600FF'),
('Помаранчевий', '#FFA500'),
('Помаранчево-персиковий', '#FFCC99'),
('Помаранчево-рожевий', '#FF9966'),
('Пурпурний', '#FF00FF'),
('Пурпуровий', '#660099'),
('Пшеничний', '#F5DEB3'),
('Пісочний колір', '#F4A460'),
('Рожевий', '#FFC0CB'),
('Рожевий Маунтбеттена', '#997A8D'),
('Рожево-лавандний', '#FFF0F5'),
('Рожево-ліловий', '#993366'),
('Салатовий', '#7FFF00'),
('Сангрія', '#92000A'),
('Сапфіровий', '#082567'),
('Світло-синій', '#007DFF'),
('Сепія', '#704214'),
('Сиваво-зелений', '#ACE1AF'),
('Сигнально-помаранчевий', '#FF9900'),
('Синя пил', '#003399'),
('Синя сталь', '#4682B4'),
('Сині яйця малинівки', '#00CCCC'),
('Синій', '#0000FF'),
('Синій (RYB)', '#0247FE'),
('Синій (пігмент)', '#333399'),
('Синій ВПС', '#5D8AA8'),
('Синій Клейна', '#3A75C4'),
('Сливовий', '#660066'),
('Смарагдовий', '#50C878'),
('Спаржевий', '#7BA05B'),
('Срібний', '#C0C0C0'),
('Старе золото', '#CFB53B'),
('Сіра спаржа', '#465945'),
('Сірий', '#808080'),
('Сірий шифер', '#708090'),
('Темний весняно-зелений', '#177245'),
('Темний жовто-брунатний', '#918151'),
('Темний зелений чай', '#BADBAD'),
('Темний пастельно-зелений', '#03C03C'),
('Темний хакі', '#BDB76B'),
('Темний індиго', '#310062'),
('Темно-аспідний сірий', '#2F4F4F'),
('Темно-брунатний', '#654321'),
('Темно-бірюзовий', '#116062'),
('Темно-зелений', '#013220'),
('Темно-зелений хакі', '#78866B'),
('Темно-золотий', '#B8860B'),
('Темно-карміновий', '#560319'),
('Темно-каштановий', '#986960'),
('Темно-кораловий', '#CD5B45'),
('Темно-лазурний', '#08457E'),
('Темно-лососевий', '#E9967A'),
('Темно-мандариновий', '#FFA812'),
('Темно-оливковий', '#556832'),
('Темно-персиковий', '#FFDAB9'),
('Темно-рожевий', '#E75480'),
('Темно-синій', '#000080'),
('Ультрамариновий', '#120A8F'),
('Умбра', '#734A12'),
('Умбра палена', '#8A3324'),
('Фуксія', '#FF00FF'),
('Фіолетовий', '#8B00FF'),
('Фіолетово-баклажановий', '#991199'),
('Фіолетово-червоний', '#C71585'),
('Хакі', '#C3B091'),
('Цинамоновий', '#7B3F00'),
('Циннвальдит', '#EBC2AF'),
('Ціан (колір)', '#00FFFF'),
('Ціано-блакитний', '#F0F8FF'),
('Червоний', '#FF0000'),
('Червоно-буро-помаранчевий', '#CD5700'),
('Червоновато-брунатний', '#CC8899'),
('Чорний', '#000000'),
('Шафрановий', '#F4C430'),
('Шкіра буйвола', '#F0DC82'),
('Шоколадний', '#D2691E'),
('Яскраво-бурштиновий', '#FF7E00'),
('Яскраво-бірюзовий', '#08E8DE'),
('Яскраво-зелений', '#66FF00'),
('Яскраво-зелений', '#40826D'),
('Яскраво-рожевий', '#FF007F'),
('Яскраво-фіолетовий', '#CD00CD'),
('Ясно-брунатний', '#CD853F'),
('Ясно-вишневий', '#DE3163'),
('Ясно-лазуровий', '#007FFF'),
('Ясно-лазуровий (веб)', '#F0FFFF'),
))
| mit | 5,913,424,033,789,453,000 | 35.55 | 56 | 0.484641 | false |
pyfa-org/Pyfa | gui/builtinContextMenus/additionsExportSelection.py | 1 | 1609 | import wx
import gui.mainFrame
from gui.contextMenu import ContextMenuSelection
from gui.utils.clipboard import toClipboard
from service.fit import Fit
from service.port.eft import exportBoosters, exportCargo, exportDrones, exportFighters, exportImplants
_t = wx.GetTranslation
class AdditionsExportAll(ContextMenuSelection):
visibilitySetting = 'additionsCopyPaste'
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.viewSpecMap = {
'droneItemMisc': (_t('Drones'), exportDrones),
'fighterItemMisc': (_t('Fighters'), exportFighters),
'cargoItemMisc': (_t('Cargo Items'), exportCargo),
'implantItemMisc': (_t('Implants'), exportImplants),
'implantItemMiscChar': (_t('Implants'), exportImplants),
'boosterItemMisc': (_t('Boosters'), exportBoosters)
}
def display(self, callingWindow, srcContext, selection):
if srcContext not in self.viewSpecMap:
return False
if not selection:
return False
fit = Fit.getInstance().getFit(self.mainFrame.getActiveFit())
if fit is None:
return False
self.srcContext = srcContext
return True
def getText(self, callingWindow, itmContext, selection):
return _t('Copy Selected {}').format(self.viewSpecMap[self.srcContext][0])
def activate(self, callingWindow, fullContext, selection, i):
export = self.viewSpecMap[self.srcContext][1](selection)
if export:
toClipboard(export)
AdditionsExportAll.register()
| gpl-3.0 | -5,329,103,880,391,081,000 | 33.234043 | 102 | 0.666874 | false |
JohnReid/biopsy | Python/biopsy/gene_locations.py | 1 | 2058 |
import os, sys, cookbook
from env import *
_ensembl_data_dir = os.path.join(get_data_dir(), 'ensembl')
def gene_location_filename( genome ):
return os.path.normpath(
os.path.join(
_ensembl_data_dir,
'gene_locations-%s.txt' % genome
)
)
def ensembl_src_dir( version ):
return 'C:\\Dev\\ThirdParty\\perl\\ensembl\\ensembl-%d' % version
def set_perl_lib_path( version ):
perl5lib = ';'.join(
[
'C:\\Dev\\ThirdParty\\perl\\ensembl\\src\\bioperl-live',
'%s\\ensembl\\modules' % ensembl_src_dir( version ),
'%s\\ensembl-compara\\modules' % ensembl_src_dir( version ),
'%s\\ensembl-variation\\modules' % ensembl_src_dir( version ),
]
)
os.putenv( 'PERL5LIB', perl5lib )
def call_gene_locations_perl( genome ):
"Must be in directory where gene_locations.pl is to call this"
filename = 'gene_locations-%s.txt' % genome
command = 'perl gene_locations.pl %s >%s' % ( genome, filename )
print "Command: '%s'" % command
return os.system( command )
default_versions = [
( 28, 'mus_musculus_core_28_33d' ),
( 31, 'mus_musculus_core_31_33g' ),
( 32, 'mus_musculus_core_32_34' ),
]
def get_gene_locations_from_ensembl( versions ):
for version, genome in versions:
set_perl_lib_path( version )
call_gene_locations_perl( genome )
def gene_locations( genome ):
"Returns a dict mapping gene ids to bunch(chr, start, end, strand)"
result = { }
for l in open( gene_location_filename( genome ), 'r' ):
fields = l.strip().split(':')
if len( fields ) != 5:
print 'Bad record: %s' % l.strip()
else:
result[fields[0]] = cookbook.Bunch(
chromosome = fields[1],
start = int( fields[2] ),
end = int( fields[3] ),
positive_sense = int( fields[4] ) == 1
)
return result
| mit | -5,305,037,074,829,856,000 | 32.737705 | 82 | 0.543732 | false |
TecnoSalta/bg | mezzanine/galleries/models.py | 1 | 5827 | from __future__ import unicode_literals
from future.builtins import str
from future.utils import native, PY2
from io import BytesIO
import os
from string import punctuation
from zipfile import ZipFile
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
try:
from django.utils.encoding import force_text
except ImportError:
# Django < 1.5
from django.utils.encoding import force_unicode as force_text
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.fields import FileField
from mezzanine.core.models import Orderable, RichText
from mezzanine.pages.models import Page
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.models import upload_to
# Set the directory where gallery images are uploaded to,
# either MEDIA_ROOT + 'galleries', or filebrowser's upload
# directory if being used.
GALLERIES_UPLOAD_DIR = "galleries"
if settings.PACKAGE_NAME_FILEBROWSER in settings.INSTALLED_APPS:
fb_settings = "%s.settings" % settings.PACKAGE_NAME_FILEBROWSER
try:
GALLERIES_UPLOAD_DIR = import_dotted_path(fb_settings).DIRECTORY
except ImportError:
pass
class Gallery(Page, RichText):
"""
Page bucket for gallery photos.
"""
zip_import = models.FileField(verbose_name=_("Zip import"), blank=True,
upload_to=upload_to("galleries.Gallery.zip_import", "galleries"),
help_text=_("Upload a zip file containing images, and "
"they'll be imported into this gallery."))
class Meta:
verbose_name = _("Gallery")
verbose_name_plural = _("Galleries")
def save(self, delete_zip_import=True, *args, **kwargs):
"""
If a zip file is uploaded, extract any images from it and add
them to the gallery, before removing the zip file.
"""
super(Gallery, self).save(*args, **kwargs)
if self.zip_import:
zip_file = ZipFile(self.zip_import)
for name in zip_file.namelist():
data = zip_file.read(name)
try:
from PIL import Image
image = Image.open(BytesIO(data))
image.load()
image = Image.open(BytesIO(data))
image.verify()
except ImportError:
pass
except:
continue
name = os.path.split(name)[1]
# This is a way of getting around the broken nature of
# os.path.join on Python 2.x. See also the comment below.
if PY2:
tempname = name.decode('utf-8')
else:
tempname = name
# A gallery with a slug of "/" tries to extract files
# to / on disk; see os.path.join docs.
slug = self.slug if self.slug != "/" else ""
path = os.path.join(GALLERIES_UPLOAD_DIR, slug, tempname)
try:
saved_path = default_storage.save(path, ContentFile(data))
except UnicodeEncodeError:
from warnings import warn
warn("A file was saved that contains unicode "
"characters in its path, but somehow the current "
"locale does not support utf-8. You may need to set "
"'LC_ALL' to a correct value, eg: 'en_US.UTF-8'.")
# The native() call is needed here around str because
# os.path.join() in Python 2.x (in posixpath.py)
# mixes byte-strings with unicode strings without
# explicit conversion, which raises a TypeError as it
# would on Python 3.
path = os.path.join(GALLERIES_UPLOAD_DIR, slug,
native(str(name, errors="ignore")))
saved_path = default_storage.save(path, ContentFile(data))
self.images.add(GalleryImage(file=saved_path))
if delete_zip_import:
zip_file.close()
self.zip_import.delete(save=True)
@python_2_unicode_compatible
class GalleryImage(Orderable):
gallery = models.ForeignKey("Gallery", related_name="images")
file = FileField(_("File"), max_length=200, format="Image",
upload_to=upload_to("galleries.GalleryImage.file", "galleries"))
description = models.CharField(_("Description"), max_length=1000,
blank=True)
class Meta:
verbose_name = _("Image")
verbose_name_plural = _("Images")
def __str__(self):
return self.description
def save(self, *args, **kwargs):
"""
If no description is given when created, create one from the
file name.
"""
if not self.id and not self.description:
name = force_text(self.file.name)
name = name.rsplit("/", 1)[-1].rsplit(".", 1)[0]
name = name.replace("'", "")
name = "".join([c if c not in punctuation else " " for c in name])
# str.title() doesn't deal with unicode very well.
# http://bugs.python.org/issue6412
name = "".join([s.upper() if i == 0 or name[i - 1] == " " else s
for i, s in enumerate(name)])
self.description = name
super(GalleryImage, self).save(*args, **kwargs)
| bsd-2-clause | 197,956,677,122,037,630 | 39.621429 | 78 | 0.565471 | false |
VCG/gp | slurm/patchgen/patchgen3.py | 1 | 4341 | import sys
sys.path.append('../../')
import gp
import glob
import mahotas as mh
import numpy as np
import os
import time
def read_section(path, z, verbose=True, withNP=True):
'''
'''
image = sorted(glob.glob(os.path.join(path, 'image', '*_'+str(z).zfill(9)+'_image.png')))
mask = sorted(glob.glob(os.path.join(path, 'mask', 'z'+str(z).zfill(8)+'.png')))
gold = sorted(glob.glob(os.path.join(path, 'gold', 'z'+str(z).zfill(8)+'.png')))
if withNP:
# load from neuroproof
rhoana = sorted(glob.glob(os.path.join(path, 'neuroproof', '*_'+str(z).zfill(9)+'_neuroproof.png')))
else:
# load from segmentation
rhoana = sorted(glob.glob(os.path.join(path, 'segmentation', '*_'+str(z).zfill(9)+'_segmentation.png')))
prob = sorted(glob.glob(os.path.join(path, 'prob', str(z).zfill(4)+'.png')))
if verbose:
print 'Loading', os.path.basename(image[0])
image = mh.imread(image[0])
mask = mh.imread(mask[0]).astype(np.bool)
gold = mh.imread(gold[0])
rhoana = mh.imread(rhoana[0])
prob = mh.imread(prob[0])
#convert ids from rgb to single channel
rhoana_single = np.zeros((rhoana.shape[0], rhoana.shape[1]), dtype=np.uint64)
rhoana_single[:, :] = rhoana[:,:,0]*256*256 + rhoana[:,:,1]*256 + rhoana[:,:,2]
gold_single = np.zeros((gold.shape[0], gold.shape[1]), dtype=np.uint64)
gold_single[:, :] = gold[:,:,0]*256*256 + gold[:,:,1]*256 + gold[:,:,2]
# # relabel the segmentations
# gold_single = Util.relabel(gold_single)
# rhoana_single = Util.relabel(rhoana_single)
#mask the rhoana output
rhoana_single[mask==0] = 0
return image, prob, mask, gold_single, rhoana_single
def generate_patches(start_slice, end_slice, withNP):
patch_index = 0
all_patches_count = 0
patch_list = []
all_error_patches = []
all_correct_patches = []
for z in range(start_slice, end_slice):
t0 = time.time()
print 'working on slice', z
input_image, input_prob, input_mask, input_gold, input_rhoana = read_section('/n/regal/pfister_lab/haehn/FINAL/cylinder/',z, False, withNP)
error_patches, patches = gp.Patch.patchify_maxoverlap(input_image, input_prob, input_mask, input_rhoana, input_gold, sample_rate=1)
print 'Generated', len(error_patches), 'split error patches and', len(patches), ' correct patches in', time.time()-t0, 'seconds.'
patch_list.append(patches)
all_error_patches += error_patches
all_correct_patches += patches
NO_PATCHES = len(all_error_patches) + len(all_correct_patches)
print 'We have a total of',NO_PATCHES,'patches.'
print 'Errors:',len(all_error_patches)
print 'Correct:',len(all_correct_patches)
PATCH_BYTES = 75*75
P_SIZE = (NO_PATCHES, 4, 75,75) # rather than raveled right now
p_rgba = np.zeros(P_SIZE, dtype=np.float32)
p_target = np.zeros(NO_PATCHES)
i = 0
for p in all_error_patches:
p_rgba[i][0] = p['image']
p_rgba[i][1] = 1. - p['prob'] ### INVERT PROB
p_rgba[i][2] = p['merged_array']
p_rgba[i][3] = p['larger_border_overlap']
p_target[i] = 1 # <--- important
i += 1
for p in all_correct_patches:
p_rgba[i][0] = p['image']
p_rgba[i][1] = 1. - p['prob'] ### INVERT PROB
p_rgba[i][2] = p['merged_array']
p_rgba[i][3] = p['larger_border_overlap']
p_target[i] = 0 # <--- important
i+=1
return p_rgba, p_target
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def run(PATCH_PATH, start_slice, end_slice, filename, withNP):
if not os.path.exists(PATCH_PATH):
os.makedirs(PATCH_PATH)
p = generate_patches(start_slice, end_slice, withNP)
shuffled = shuffle_in_unison_inplace(p[0],
p[1]
)
print 'saving..'
np.save(PATCH_PATH+filename+'.npz', shuffled[0])
np.save(PATCH_PATH+filename+'_targets.npz', shuffled[1])
print 'Done!'
# run('/n/regal/pfister_lab/haehn/FINAL/IPMLB', 10, 20, 'train', True)
run('/n/regal/pfister_lab/haehn/FINAL/IPMLB_before_NP/', 100, 150, 'train3b', False)
| mit | 7,463,076,509,551,324,000 | 28.331081 | 147 | 0.589265 | false |
ESOedX/edx-platform | cms/djangoapps/contentstore/tests/test_clone_course.py | 1 | 6390 | """
Unit tests for cloning a course between the same and different module stores.
"""
from __future__ import absolute_import
import json
import six
from django.conf import settings
from mock import Mock, patch
from opaque_keys.edx.locator import CourseLocator
from contentstore.tasks import rerun_course
from contentstore.tests.utils import CourseTestCase
from course_action_state.managers import CourseRerunUIStateManager
from course_action_state.models import CourseRerunState
from student.auth import has_course_author_access
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import EdxJSONEncoder, ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class CloneCourseTest(CourseTestCase):
"""
Unit tests for cloning a course
"""
def test_clone_course(self):
"""Tests cloning of a course as follows: XML -> Mongo (+ data) -> Mongo -> Split -> Split"""
# 1. import and populate test toy course
mongo_course1_id = self.import_and_populate_course()
mongo_course2_id = mongo_course1_id
# 3. clone course (mongo -> split)
with self.store.default_store(ModuleStoreEnum.Type.split):
split_course3_id = CourseLocator(
org="edx3", course="split3", run="2013_Fall"
)
self.store.clone_course(mongo_course2_id, split_course3_id, self.user.id)
self.assertCoursesEqual(mongo_course2_id, split_course3_id)
# 4. clone course (split -> split)
split_course4_id = CourseLocator(
org="edx4", course="split4", run="2013_Fall"
)
self.store.clone_course(split_course3_id, split_course4_id, self.user.id)
self.assertCoursesEqual(split_course3_id, split_course4_id)
def test_space_in_asset_name_for_rerun_course(self):
"""
Tests check the scenario where one course which has an asset with percentage(%) in its
name, it should re-run successfully.
"""
org = 'edX'
course_number = 'CS101'
course_run = '2015_Q1'
display_name = 'rerun'
fields = {'display_name': display_name}
course_assets = set([u'subs_Introduction%20To%20New.srt.sjson'], )
# Create a course using split modulestore
course = CourseFactory.create(
org=org,
number=course_number,
run=course_run,
display_name=display_name,
default_store=ModuleStoreEnum.Type.split
)
# add an asset
asset_key = course.id.make_asset_key('asset', 'subs_Introduction%20To%20New.srt.sjson')
content = StaticContent(
asset_key, 'Dummy assert', 'application/json', 'dummy data',
)
contentstore().save(content)
# Get & verify all assets of the course
assets, count = contentstore().get_all_content_for_course(course.id)
self.assertEqual(count, 1)
self.assertEqual(set([asset['asset_key'].block_id for asset in assets]), course_assets)
# rerun from split into split
split_rerun_id = CourseLocator(org=org, course=course_number, run="2012_Q2")
CourseRerunState.objects.initiated(course.id, split_rerun_id, self.user, fields['display_name'])
result = rerun_course.delay(
six.text_type(course.id),
six.text_type(split_rerun_id),
self.user.id,
json.dumps(fields, cls=EdxJSONEncoder)
)
# Check if re-run was successful
self.assertEqual(result.get(), "succeeded")
rerun_state = CourseRerunState.objects.find_first(course_key=split_rerun_id)
self.assertEqual(rerun_state.state, CourseRerunUIStateManager.State.SUCCEEDED)
def test_rerun_course(self):
"""
Unit tests for :meth: `contentstore.tasks.rerun_course`
"""
mongo_course1_id = self.import_and_populate_course()
# rerun from mongo into split
split_course3_id = CourseLocator(
org="edx3", course="split3", run="rerun_test"
)
# Mark the action as initiated
fields = {'display_name': 'rerun'}
CourseRerunState.objects.initiated(mongo_course1_id, split_course3_id, self.user, fields['display_name'])
result = rerun_course.delay(six.text_type(mongo_course1_id), six.text_type(split_course3_id), self.user.id,
json.dumps(fields, cls=EdxJSONEncoder))
self.assertEqual(result.get(), "succeeded")
self.assertTrue(has_course_author_access(self.user, split_course3_id), "Didn't grant access")
rerun_state = CourseRerunState.objects.find_first(course_key=split_course3_id)
self.assertEqual(rerun_state.state, CourseRerunUIStateManager.State.SUCCEEDED)
# try creating rerunning again to same name and ensure it generates error
result = rerun_course.delay(six.text_type(mongo_course1_id), six.text_type(split_course3_id), self.user.id)
self.assertEqual(result.get(), "duplicate course")
# the below will raise an exception if the record doesn't exist
CourseRerunState.objects.find_first(
course_key=split_course3_id,
state=CourseRerunUIStateManager.State.FAILED
)
# try to hit the generic exception catch
with patch('xmodule.modulestore.split_mongo.mongo_connection.MongoConnection.insert_course_index', Mock(side_effect=Exception)):
split_course4_id = CourseLocator(org="edx3", course="split3", run="rerun_fail")
fields = {'display_name': 'total failure'}
CourseRerunState.objects.initiated(split_course3_id, split_course4_id, self.user, fields['display_name'])
result = rerun_course.delay(six.text_type(split_course3_id), six.text_type(split_course4_id), self.user.id,
json.dumps(fields, cls=EdxJSONEncoder))
self.assertIn("exception: ", result.get())
self.assertIsNone(self.store.get_course(split_course4_id), "Didn't delete course after error")
CourseRerunState.objects.find_first(
course_key=split_course4_id,
state=CourseRerunUIStateManager.State.FAILED
)
| agpl-3.0 | -2,465,608,985,981,198,300 | 44.642857 | 136 | 0.654617 | false |
DanielTakeshi/rl_algorithms | trpo/trpo.py | 1 | 19601 | """
This contains the TRPO class. Following John's code, this will contain the bulk
of the Tensorflow construction and related code. Call this from the `main.py`
script.
(c) April 2017 by Daniel Seita, based upon `starter code` by John Schulman, who
used a Theano version.
"""
import gym
import numpy as np
import tensorflow as tf
import time
import utils_trpo
from collections import defaultdict
from fxn_approx import *
np.set_printoptions(suppress=True, precision=5, edgeitems=10)
import sys
if "../" not in sys.path:
sys.path.append("../")
from utils import utils_pg as utils
from utils import logz
class TRPO:
""" A TRPO agent. The constructor builds its computational graph. """
def __init__(self, args, sess, env, vf_params):
""" Initializes the TRPO agent. For now, assume continuous control, so
we'll be outputting the mean of Gaussian policies.
It's similar to John Schulman's code. Here, `args` plays roughly the
role of his `usercfg`, and we also initialize the computational graph
here, this time in Tensorflow and not Theano. In his code, agents are
already outfitted with value functions and policy functions, among other
things. We do something similar by supplying the value function as
input. For symbolic variables, I try to be consistent with the naming
conventions at the end with `n`, `o`, and/or `a` to describe dimensions.
"""
self.args = args
self.sess = sess
self.env = env
self.ob_dim = ob_dim = env.observation_space.shape[0]
self.ac_dim = ac_dim = env.action_space.shape[0]
if args.vf_type == 'linear':
self.vf = LinearValueFunction(**vf_params)
elif args.vf_type == 'nn':
self.vf = NnValueFunction(session=sess, ob_dim=ob_dim, **vf_params)
# Placeholders for the feed_dicts, i.e. the "beginning" of the graph.
self.ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
self.ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
self.adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
# Constructing the policy network, mapping from states -> mean vector.
self.h1 = utils.lrelu(utils.dense(self.ob_no, 64, "h1", weight_init=utils.normc_initializer(1.0)))
self.h2 = utils.lrelu(utils.dense(self.h1, 64, "h2", weight_init=utils.normc_initializer(1.0)))
# Last layer of the network to get the mean, plus also an `old` version.
self.mean_na = utils.dense(self.h2, ac_dim, "mean", weight_init=utils.normc_initializer(0.05))
self.oldmean_na = tf.placeholder(shape=[None, ac_dim], name='oldmean', dtype=tf.float32)
# The log standard deviation *vector*, to be concatenated with the mean vector.
self.logstd_a = tf.get_variable("logstd", [ac_dim], initializer=tf.zeros_initializer())
self.oldlogstd_a = tf.placeholder(shape=[ac_dim], name="oldlogstd", dtype=tf.float32)
# In VPG, use logprob in surrogate loss. In TRPO, we also need the old one.
self.logprob_n = utils.gauss_log_prob_1(mu=self.mean_na, logstd=self.logstd_a, x=self.ac_na)
self.oldlogprob_n = utils.gauss_log_prob_1(mu=self.oldmean_na, logstd=self.oldlogstd_a, x=self.ac_na)
self.surr = - tf.reduce_mean(self.adv_n * tf.exp(self.logprob_n - self.oldlogprob_n))
# Sample the action. Here, self.mean_na should be of shape (1,a).
self.sampled_ac = (tf.random_normal(tf.shape(self.mean_na)) * tf.exp(self.logstd_a) + self.mean_na)[0]
# Diagnostics, KL divergence, entropy.
self.kl = tf.reduce_mean(utils.gauss_KL_1(self.mean_na, self.logstd_a, self.oldmean_na, self.oldlogstd_a))
self.ent = 0.5 * ac_dim * tf.log(2.*np.pi*np.e) + 0.5 * tf.reduce_sum(self.logstd_a)
# Do we need these?
## self.nbatch = tf.shape(self.ob_no)[0] (maybe)
## self.stepsize = tf.placeholder(shape=[], dtype=tf.float32) (maybe)
## self.update_op = tf.train.AdamOptimizer(sy_stepsize).minimize(sy_surr) (almost surely delete)
# Policy gradient vector. Only weights for the policy net, NOT value function.
if args.vf_type == 'linear':
self.params = tf.trainable_variables()
elif args.vf_type == 'nn':
self.params = [x for x in tf.trainable_variables() if 'nnvf' not in x.name]
self.pg = self._flatgrad(self.surr, self.params)
assert len((self.pg).get_shape()) == 1
# Prepare the Fisher-Vector product computation. I _think_ this is how
# to do it, stopping gradients from the _current_ policy (not the old
# one) so that the KL divegence is computed with a fixed first argument.
# It seems to make sense from John Schulman's slides. Also, the
# reduce_mean here should be the mean KL approximation to the max KL.
kl_firstfixed = tf.reduce_mean(utils.gauss_KL_1(
tf.stop_gradient(self.mean_na),
tf.stop_gradient(self.logstd_a),
self.mean_na,
self.logstd_a
))
grads = tf.gradients(kl_firstfixed, self.params)
# Here, `flat_tangent` is a placeholder vector of size equal to #of (PG)
# params. Then `tangents` contains various subsets of that vector.
self.flat_tangent = tf.placeholder(tf.float32, shape=[None], name="flat_tangent")
shapes = [var.get_shape().as_list() for var in self.params]
start = 0
tangents = []
for shape in shapes:
size = np.prod(shape)
tangents.append(tf.reshape(self.flat_tangent[start:start+size], shape))
start += size
self.num_params = start
# Do elementwise g*tangent then sum components, then add everything at the end.
# John Schulman used T.add(*[...]). The TF equivalent seems to be tf.add_n.
assert len(grads) == len(tangents)
self.gradient_vector_product = tf.add_n(inputs=
[tf.reduce_sum(g*tangent) for (g, tangent) in zip(grads, tangents)]
)
# The actual Fisher-vector product operation, where the gradients are
# taken w.r.t. the "loss" function `gvp`. I _think_ the `grads` from
# above computes the first derivatives, and then the `gvp` is computing
# the second derivatives. But what about hessian_vector_product?
self.fisher_vector_product = self._flatgrad(self.gradient_vector_product, self.params)
# Deal with logic about *getting* parameters (as a flat vector).
self.get_params_flat_op = tf.concat([tf.reshape(v, [-1]) for v in self.params], axis=0)
# Finally, deal with logic about *setting* parameters.
self.theta = tf.placeholder(tf.float32, shape=[self.num_params], name="theta")
start = 0
updates = []
for v in self.params:
shape = v.get_shape()
size = tf.reduce_prod(shape)
# Note that tf.assign(ref, value) assigns `value` to `ref`.
updates.append(
tf.assign(v, tf.reshape(self.theta[start:start+size], shape))
)
start += size
self.set_params_flat_op = tf.group(*updates) # Performs all updates together.
print("In TRPO init, shapes:\n{}\nstart={}".format(shapes, start))
print("self.pg: {}\ngvp: {}\nfvp: {}".format(self.pg,
self.gradient_vector_product, self.fisher_vector_product))
print("Finished with the TRPO agent initialization.")
self.start_time = time.time()
def update_policy(self, paths, infodict):
""" Performs the TRPO policy update based on a minibach of data.
Note: this is mostly where the differences between TRPO and VPG become
apparent. We do a conjugate gradient step followed by a line search. I'm
not sure if we should be adjusting the step size based on the KL
divergence, as we did in VPG. Right now we don't. This is where we do a
lot of session calls, FYI.
Params:
paths: A LIST of defaultdicts with information from the rollouts.
infodict: A dictionary with statistics for logging later.
"""
prob_np = np.concatenate([path["prob"] for path in paths])
ob_no = np.concatenate([path["observation"] for path in paths])
action_na = np.concatenate([path["action"] for path in paths])
adv_n = np.concatenate([path["advantage"] for path in paths])
assert prob_np.shape[0] == ob_no.shape[0] == action_na.shape[0] == adv_n.shape[0]
assert len(prob_np.shape) == len(ob_no.shape) == len(action_na.shape) == 2
assert len(adv_n.shape) == 1
# Daniel: simply gets a flat vector of the parameters.
thprev = self.sess.run(self.get_params_flat_op)
# Make a feed to avoid clutter later. Note, our code differs slightly
# from John Schulman as we have to explicitly provide the old means and
# old logstds, which we concatenated together into the `prob` keyword.
# The mean is the first half and the logstd is the second half.
k = self.ac_dim
feed = {self.ob_no: ob_no,
self.ac_na: action_na,
self.adv_n: adv_n,
self.oldmean_na: prob_np[:,:k],
self.oldlogstd_a: prob_np[0,k:]} # Use 0 because all logstd are same.
# Had to add the extra flat_tangent to the feed, otherwise I'd get errors.
def fisher_vector_product(p):
feed[self.flat_tangent] = p
fvp = self.sess.run(self.fisher_vector_product, feed_dict=feed)
return fvp + self.args.cg_damping*p
# Get the policy gradient. Also the losses, for debugging.
g = self.sess.run(self.pg, feed_dict=feed)
surrloss_before, kl_before, ent_before = self.sess.run(
[self.surr, self.kl, self.ent], feed_dict=feed)
assert kl_before == 0
if np.allclose(g, 0):
print("\tGot zero gradient, not updating ...")
else:
stepdir = utils_trpo.cg(fisher_vector_product, -g)
shs = 0.5*stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / self.args.max_kl)
infodict["LagrangeM"] = lm
fullstep = stepdir / lm
neggdotstepdir = -g.dot(stepdir)
# Returns the current self.surr (surrogate loss).
def loss(th):
self.sess.run(self.set_params_flat_op, feed_dict={self.theta: th})
return self.sess.run(self.surr, feed_dict=feed)
# Update the weights using `self.set_params_flat_op`.
success, theta = utils_trpo.backtracking_line_search(loss,
thprev, fullstep, neggdotstepdir/lm)
self.sess.run(self.set_params_flat_op, feed_dict={self.theta: theta})
surrloss_after, kl_after, ent_after = self.sess.run(
[self.surr, self.kl, self.ent], feed_dict=feed)
logstd_new = self.sess.run(self.logstd_a, feed_dict=feed)
print("logstd new = {}".format(logstd_new))
# For logging later.
infodict["gNorm"] = np.linalg.norm(g)
infodict["Success"] = success
infodict["LagrangeM"] = lm
infodict["pol_surr_before"] = surrloss_before
infodict["pol_surr_after"] = surrloss_after
infodict["pol_kl_before"] = kl_before
infodict["pol_kl_after"] = kl_after
infodict["pol_ent_before"] = ent_before
infodict["pol_ent_after"] = ent_after
def _flatgrad(self, loss, var_list):
""" A Tensorflow version of John Schulman's `flatgrad` function. It
computes the gradients but does NOT apply them (for now).
This is only called during the `init` of the TRPO graph, so I think it's
OK. Otherwise, wouldn't it be constantly rebuilding the computational
graph? Or doing something else? Eh, for now I think it's OK.
Params:
loss: The loss function we're optimizing, which I assume is always
scalar-valued.
var_list: The list of variables (from `tf.trainable_variables()`) to
take gradients. This should only be for the policynets.
Returns:
A single flat vector with all gradients concatenated.
"""
grads = tf.gradients(loss, var_list)
return tf.concat([tf.reshape(g, [-1]) for g in grads], axis=0)
def _act(self, ob):
""" A "private" method for the TRPO agent so that it acts and then can
provide extra information.
Note that the mean and logstd here are for the current policy. There is
no updating done here; that's done _afterwards_. The agentinfo is a
vector of shape (2a,) where a is the action dimension.
"""
action, mean, logstd = self.sess.run(
[self.sampled_ac, self.mean_na, self.logstd_a],
feed_dict={self.ob_no : ob[None]}
)
agentinfo = dict()
agentinfo["prob"] = np.concatenate((mean.flatten(), logstd.flatten()))
return (action, agentinfo)
def get_paths(self, seed_iter, env):
""" Computes the paths, which contains all the information from the
rollouts that we need for the TRPO update.
We run enough times (which may be many episodes) as desired from our
user-provided parameters, storing relevant material into `paths` for
future use. The main difference from VPG is that we have to get extra
information about the current log probabilities (which will later be the
_old_ log probs) when calling self.act(ob).
Equivalent to John Schulman's `do_rollouts_serial` and `do_rollouts`.
It's easy to put all lists inside a single defaultdict.
Params:
seed_iter: Itertools for getting new random seeds via incrementing.
env: The current OpenAI gym environment.
Returns:
paths: A _list_ where each element is a _dictionary_ corresponding
to statistics from ONE episode.
"""
paths = []
timesteps_sofar = 0
while True:
np.random.seed(seed_iter.next())
ob = env.reset()
data = defaultdict(list)
# Run one episode and put the data inside `data`, then in `paths`.
while True:
data["observation"].append(ob)
action, agentinfo = self._act(ob)
data["action"].append(action)
for (k,v) in agentinfo.iteritems():
data[k].append(v)
ob, rew, done, _ = env.step(action)
data["reward"].append(rew)
if done:
break
data = {k:np.array(v) for (k,v) in data.iteritems()}
paths.append(data)
timesteps_sofar += utils.pathlength(data)
if (timesteps_sofar >= self.args.min_timesteps_per_batch):
break
return paths
def compute_advantages(self, paths):
""" Computes standardized advantages from data collected during the most
recent set of rollouts.
No need to return anything, because advantages can be stored in `paths`.
Also, self.vf is used to estimate the baseline to reduce variance, and
later we will utilize the `path["baseline"]` to refit the value
function. Finally, note that the iteration over `paths` means each
`path` item is a dictionary, corresponding to the statistics garnered
over ONE episode. This makes computing the discount easy since we don't
have to worry about crossing over different episodes.
Params:
paths: A LIST of defaultdicts with information from the rollouts.
Each defaultdict element contains information about ONE episode.
"""
for path in paths:
path["reward"] = utils.discount(path["reward"], self.args.gamma)
path["baseline"] = self.vf.predict(path["observation"])
path["advantage"] = path["reward"] - path["baseline"]
adv_n = np.concatenate([path["advantage"] for path in paths])
for path in paths:
path["advantage"] = (path["advantage"] - adv_n.mean()) / (adv_n.std() + 1e-8)
def fit_value_function(self, paths, vfdict):
""" Fits the TRPO's value function with the current minibatch of data.
Also takes in another dictionary, `vfdict`, for relevant statistics
related to the value function.
"""
ob_no = np.concatenate([path["observation"] for path in paths])
vtarg_n = np.concatenate([path["reward"] for path in paths])
assert ob_no.shape[0] == vtarg_n.shape[0]
out = self.vf.fit(ob_no, vtarg_n)
for key in out:
vfdict[key] = out[key]
def log_diagnostics(self, paths, infodict, vfdict):
""" Just logging using the `logz` functionality. """
ob_no = np.concatenate([path["observation"] for path in paths])
vpred_n = np.concatenate([path["baseline"] for path in paths])
vtarg_n = np.concatenate([path["reward"] for path in paths])
elapsed_time = (time.time() - self.start_time) # In seconds
episode_rewards = np.array([path["reward"].sum() for path in paths])
episode_lengths = np.array([utils.pathlength(path) for path in paths])
# These are *not* logged in John Schulman's code.
#logz.log_tabular("Success", infodict["Success"])
#logz.log_tabular("LagrangeM", infodict["LagrangeM"])
#logz.log_tabular("gNorm", infodict["gNorm"])
# These *are* logged in John Schulman's code. First, rewards:
logz.log_tabular("NumEpBatch", len(paths))
logz.log_tabular("EpRewMean", episode_rewards.mean())
logz.log_tabular("EpRewMax", episode_rewards.max())
logz.log_tabular("EpRewSEM", episode_rewards.std()/np.sqrt(len(paths)))
logz.log_tabular("EpLenMean", episode_lengths.mean())
logz.log_tabular("EpLenMax", episode_lengths.max())
logz.log_tabular("RewPerStep", episode_rewards.sum()/episode_lengths.sum())
logz.log_tabular("vf_mse_before", vfdict["MSEBefore"])
logz.log_tabular("vf_mse_after", vfdict["MSEAfter"])
logz.log_tabular("vf_PredStdevBefore", vfdict["PredStdevBefore"])
logz.log_tabular("vf_PredStdevAfter", vfdict["PredStdevAfter"])
logz.log_tabular("vf_TargStdev", vfdict["TargStdev"])
logz.log_tabular("vf_EV_before", utils.explained_variance_1d(vpred_n, vtarg_n))
logz.log_tabular("vf_EV_after", utils.explained_variance_1d(self.vf.predict(ob_no), vtarg_n))
# If overfitting, EVAfter >> EVBefore. Also, we fit the value function
# _after_ using it to compute the baseline to avoid introducing bias.
logz.log_tabular("pol_surr_before", infodict["pol_surr_before"])
logz.log_tabular("pol_surr_after", infodict["pol_surr_after"])
logz.log_tabular("pol_kl_before", infodict["pol_kl_before"])
logz.log_tabular("pol_kl_after", infodict["pol_kl_after"])
logz.log_tabular("pol_ent_before", infodict["pol_ent_before"])
logz.log_tabular("pol_ent_after", infodict["pol_ent_after"])
logz.log_tabular("TimeElapsed", elapsed_time)
logz.dump_tabular()
| mit | 8,762,082,859,905,008,000 | 47.880299 | 115 | 0.617366 | false |
SecuredByTHEM/ndr | ndr/cert_request.py | 1 | 2219 | # This file is part of NDR.
#
# Copyright (C) 2017 - Secured By THEM
# Original Author: Michael Casadevall <[email protected]>
#
# NDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NDR. If not, see <http://www.gnu.org/licenses/>.
'''Certificate requests are used for both creating the initial SSL certificates
for S/MIME signing/Client auth, and renewal requests.'''
import os
from enum import Enum
import ndr
class CertificateRequest(ndr.IngestMessage):
'''Certificate Requests'''
def __init__(self, config=None):
self.config = config
self.csr = None
self.certificate = None
self.certificate_chain = None
self.root_certificate = None
ndr.IngestMessage.__init__(
self, config, ndr.IngestMessageTypes.CERTIFICATE_REQUEST)
def from_message(self, ingest_msg: ndr.IngestMessage):
'''Converts an ingest message to a Certificate Request record'''
super().from_message(ingest_msg)
self.csr = self.headers['csr']
self.certificate = self.headers['certificate']
self.certificate_chain = self.headers['certificate_chain']
self.root_certificate = self.headers['root_certificate']
return self
def create_report(self):
self.add_header('csr', self.csr)
self.add_header('certificate', self.certificate)
self.add_header('certificate_chain', self.certificate_chain)
self.add_header('root_certificate', self.root_certificate)
super().create_report()
class CertificateRequestTypes(Enum):
'''Indicates the type for certificate enlistment'''
NEW_CERTIFICATE = "new_certificate"
SIGNED_CERTIFICATE = "signed_certificate"
RENEWAL = "renew"
| gpl-3.0 | -5,842,498,683,836,158,000 | 35.983333 | 79 | 0.701667 | false |
aswarren/GOGranny | GOGranny/semGODiGraph.py | 1 | 8815 | #class with two way transitive closure (ancestors and descendants) computed for nodes for the purpose of calculating the Graph Entropy
#author: Andrew Warren [email protected]
#NOTE: There are obsolete terms. Not in the graph itself and not in the nodes or node iterator but in the term lookup table (node_lookup)
from GOGranny import GODiGraph
from GOGranny import Aspects
from GOGranny import GONode
from math import log
import shelve, cPickle
import sys
#this class computes the transitive closure on all nodes
#and is used for computing the IC and term similarity for all nodes
class semGODiGraph(GODiGraph):
def __init__(self, storage, aspect=Aspects.BP, storage_path="none"):
GODiGraph.__init__(self, storage, species="none", aspect=aspect)#no species
#for retreiving the node by GOID
self.storage_path=storage_path
self.leaves=set()
self.tranState=False #transitive closure has not been calc.
#adds prob, suprisal, and gIC to each node
self.totalSuprisal=0
self.avgSuprisal=0
self.maxGraphIC=0
self.num_nodes=0
def semMakeGraph(self):
self.check_closure()
self.num_nodes=self.number_of_nodes()
def makeGraph(self, annotatedTermsOnly=False):
self.node_lookup=GODiGraph.makeGraph(self, annotatedTermsOnly)
self.semMakeGraph()
##Compute the transitive closure of the graph so that the graph based information content can be calculated
#also determines which nodes are leaves
#Stores a set of ancestors and descendants at each node: each set is made up of node pointers
#DFS traversal
def trans_closure(self, currentNode, seenabove):
if hasattr(currentNode, 'ancestors'):
currentNode.ancestors=currentNode.ancestors.union(seenabove)
else:
currentNode.ancestors=seenabove.copy()
seenabove.add(currentNode)
seenbelow=set()
for c in self.predecessors(currentNode):
seenbelow.add(c)
seenbelow=seenbelow.union(self.trans_closure(c,seenabove.copy()))
currentNode.descendants=seenbelow.copy()
if len(currentNode.descendants)==0:
self.leaves.add(currentNode)
currentNode.leaf = True
return seenbelow.copy()
##Check if the trans_closure has been computed and if not do it
def check_closure(self):
if(not self.tranState):
root_ancestor=set()
self.trans_closure(self.root, root_ancestor)
self.tranState=True
##converts the aspect type of the ontology into a string
def getAspect(self):
if(self.aspect==Aspects.BP):
return "BP"
elif(self.aspect==Aspects.CC):
return "CC"
elif(self.aspect==Aspects.MF):
return "MF"
##Determine the uncertainty of the graph when a term is asserted as true for a gene
##If no ID is given then the uncertainty of the entire graph is determined
##Parameters: the node for GO term asserted to be true
##whether to exclude the descendants of the node
##WARNING: this function can also be affected by the obsolete term issue
##setting annot_node excludes ancestors
##setting annot_node and exclude_dec=True excludes ancestors and descendants
##setting reroot calculates the entropy of the graph rooted at that node
def calc_graph_uncert(self, annot_node=None, exclude_dec=False, reroot=None, invroot=None):
num_induced=0
excluded=set()
to_subtract=0
num_desc=0
result=0
contribution=0
dep_uncert=0#the dependent uncertainty
#be careful getting references to sets Ancenstors and Descendants!
if reroot:
sub_graph=self.getDecCopy(reroot)
sub_graph.add(reroot)
anc_ignore=self.getAnc(reroot)
cur_num_nodes=len(sub_graph)
init_prob=1/float(cur_num_nodes)
init_uncert=-(log(init_prob)/log(2))
for j in sub_graph:
induced_nodes=self.getAnc(j).intersection(sub_graph)
j_num_induce=len(induced_nodes)
j_num_desc=len(self.getDec(j))
if len(sub_graph)==(j_num_induce+j_num_desc):
j_probability=1
else: j_probability=1/float(len(sub_graph)-j_num_induce-j_num_desc)
contribution+= -log(j_probability)/log(2)
dep_uncert=contribution*init_prob #probability that a block is active * the conditional entropy when it is active
result=dep_uncert+init_uncert
elif invroot:
sub_graph=self.getAncCopy(invroot)
sub_graph.add(invroot)
cur_num_nodes=len(sub_graph)
init_prob=1/float(cur_num_nodes)
init_uncert=-(log(init_prob)/log(2))
for k in sub_graph:
induced_nodes=self.getAnc(k)#.intersection(sub_graph) no intersect needed since truepath
k_num_induce=len(induced_nodes)
k_num_desc=len(self.getDec(k).intersection(sub_graph))
if len(sub_graph)==(k_num_induce+k_num_desc):
k_probability=1
else: k_probability=1/float(len(sub_graph)-k_num_induce-k_num_desc)
contribution+= -log(k_probability)/log(2)
dep_uncert=contribution*init_prob #probability that a block is active * the conditional entropy when it is active
result=dep_uncert+init_uncert
else:
if(annot_node != None):
excluded=self.getAnc(annot_node)#get the ancestors of the node
if exclude_dec: excluded=excluded.union(self.getDec(annot_node))
num_induced=len(excluded)#not +1 because though +1 for itself -1 for the root node which should always be present
#num_desc+=len(self.getDec(annot_node)) #do not need to get the number of descendants for the previous annotation
cur_num_nodes=self.num_nodes-num_induced#the number of nodes currently in the graph given a previous annotation
init_prob=1/float(cur_num_nodes)#initial probability is 1/number of nodes left in ontology
init_uncert=-(log(init_prob)/log(2))# since all the nodes have equal probability the average ends up being -log P
#for every node in the ontology get its contribution to the annotation uncertainty
#this part skips the inner loop because the summation will just be the -log(j_prob)
num_root=0
for j in self.nodes_iter():
if (not j in excluded) and (not j == annot_node):#if this term is in the ancestors induced by a node it has a probability of 1 and uncertainty of 0
induced_nodes=self.getAnc(j).union(excluded).union(set([j]))#get the number of nodes that cannot follow this one in an annotation
if annot_node != None: induced_nodes.add(annot_node)
j_num_induce=len(induced_nodes)
j_num_desc=len(self.getDec(j))
if (j_num_induce == 0):
num_root+=1
assert num_root <= 1
if(self.num_nodes==j_num_induce+j_num_desc):
j_probability=1
else: j_probability=1/float(self.num_nodes-j_num_induce-j_num_desc)
contribution+= -log(j_probability)/log(2)
# result+=(1/j_num_induce)
dep_uncert=contribution*init_prob #probability that a block is active * the conditional entropy when it is active
result=dep_uncert+init_uncert
return result
##For a set of nodes get all their ancestors
#include the actual nodes themselves
def nodeGetAllAnc(self, nodeset):
result=set()
for t in nodeset:
result.add(t)
result=result.union(t.ancestors)
return result
##For a set of nodes get all their descendants
#include the actual nodes themselves
def nodeGetAllDes(self, nodeset):
result=set()
for t in nodeset:
result.add(t)
result=result.union(t.descendants)
return result
##For a node retreive ancestors
##WARNING: returns a reference to the ancestors set
def getAnc(self, tnode):
if tnode!= None and hasattr(tnode, 'ancestors'):
return tnode.ancestors #this is a reference!
else: return set()
##For a node retreive descendants
##WARNING: returns a reference to the descendants set
def getDec(self, tnode):
if tnode!= None and hasattr(tnode, 'descendants'):
return tnode.descendants #this is a reference!
else: return set()
##For a node retreive ancestors
def getAncCopy(self, tnode):
return getAnc().copy()
##For a node retreive descendants
def getDecCopy(self, tnode):
return getDec().copy()
##For a set of terms get all their ancestors
#include the actual terms themselves
#WARNING: The code here should be changed to account for obsolete terms being called here
#apparently nodes are still created for obsolete nodes
#if the TERM IS OBSOLETE THEN AN EMPTY SET WILL BE RETURNED
def getAllAnc(self, terms, include_self=True):
result=set()
for t in terms:
if type(t) == GONode.GOTermNode:
n = t
else:
n=self.idGetNode(t)
if n!= None:
if not hasattr(n, 'ancestors'):
sys.stderr.write(str(n.dbid)+" does not have ancestors\n")
return result
if include_self:
result.add(n)
result=result.union(n.ancestors)
return result
##Get Node according to term id
#storage structure tracks obsolete terms and auto_converts to suggested alternatives
def idGetNode(self, term_id):
if term_id == None:
return None
result=self.node_lookup.get(term_id, None)
if term_id in self.aliases:
sys.stderr.write("WARNING: Old GOID autoconverted from "+term_id+" to "+result.dbid+"\n")
return result
| gpl-2.0 | -7,691,478,473,109,416,000 | 36.510638 | 151 | 0.732048 | false |
blueyed/pipsi | pipsi.py | 1 | 12835 | import os
import sys
import shutil
import glob
from os.path import join, realpath, dirname, normpath, normcase
from operator import methodcaller
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import click
from pkg_resources import Requirement
try:
WindowsError
except NameError:
IS_WIN = False
BIN_DIR = 'bin'
else:
IS_WIN = True
BIN_DIR = 'Scripts'
FIND_SCRIPTS_SCRIPT = r'''if 1:
import os
import sys
import pkg_resources
pkg = sys.argv[1]
prefix = sys.argv[2]
dist = pkg_resources.get_distribution(pkg)
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
print(os.path.join(dist.location, line.split(',')[0]))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
print(os.path.join(dist.egg_info, line.split(',')[0]))
elif dist.has_metadata('entry_points.txt'):
try:
from ConfigParser import SafeConfigParser
from StringIO import StringIO
except ImportError:
from configparser import SafeConfigParser
from io import StringIO
parser = SafeConfigParser()
parser.readfp(StringIO(
'\n'.join(dist.get_metadata_lines('entry_points.txt'))))
if parser.has_section('console_scripts'):
for name, _ in parser.items('console_scripts'):
print(os.path.join(prefix, name))
'''
# The `click` custom context settings
CONTEXT_SETTINGS = dict(
help_option_names=['-h', '--help'],
)
def normalize_package(value):
# Strips the version and normalizes name
requirement = Requirement.parse(value)
return requirement.project_name.lower()
def normalize(path):
return normcase(normpath(realpath(path)))
def real_readlink(filename):
try:
target = os.readlink(filename)
except (OSError, IOError, AttributeError):
return None
return normpath(realpath(join(dirname(filename), target)))
def statusoutput(argv, **kw):
from subprocess import Popen, PIPE
p = Popen(
argv, stdout=PIPE, stderr=PIPE, **kw)
output = p.communicate()[0].strip()
if not isinstance(output, str):
output = output.decode('utf-8', 'replace')
return p.returncode, output
def publish_script(src, dst):
if IS_WIN:
# always copy new exe on windows
shutil.copy(src, dst)
click.echo(' Copied Executable ' + dst)
return True
else:
old_target = real_readlink(dst)
if old_target == src:
return True
try:
os.remove(dst)
except OSError:
pass
try:
os.symlink(src, dst)
except OSError:
pass
else:
click.echo(' Linked script ' + dst)
return True
def find_scripts(virtualenv, package):
prefix = normalize(join(virtualenv, BIN_DIR, ''))
files = statusoutput([
join(prefix, 'python'), '-c', FIND_SCRIPTS_SCRIPT,
package, prefix
])[1].splitlines()
files = map(normalize, files)
files = filter(
methodcaller('startswith', prefix),
files,
)
def valid(filename):
return os.path.isfile(filename) and \
IS_WIN or os.access(filename, os.X_OK)
result = list(filter(valid, files))
if IS_WIN:
for filename in files:
globed = glob.glob(filename + '*')
result.extend(filter(valid, globed))
return result
class UninstallInfo(object):
def __init__(self, package, paths=None, installed=True):
self.package = package
self.paths = paths or []
self.installed = installed
def perform(self):
for path in self.paths:
try:
os.remove(path)
except OSError:
shutil.rmtree(path)
class Repo(object):
def __init__(self, home, bin_dir):
self.home = home
self.bin_dir = bin_dir
def resolve_package(self, spec, python=None):
url = urlparse(spec)
if url.netloc == 'file':
location = url.path
elif url.netloc != '':
if not url.fragment.startswith('egg='):
raise click.UsageError('When installing from URLs you need '
'to add an egg at the end. For '
'instance git+https://.../#egg=Foo')
return url.fragment[4:], [spec]
elif os.path.isdir(spec):
location = spec
else:
return spec, [spec]
error, name = statusoutput(
[python or sys.executable, 'setup.py', '--name'],
cwd=location)
if error:
raise click.UsageError('%s does not appear to be a local '
'Python package.' % spec)
return name, [location]
def get_package_path(self, package):
return join(self.home, normalize_package(package))
def find_installed_executables(self, path):
prefix = join(realpath(normpath(path)), '')
try:
for filename in os.listdir(self.bin_dir):
exe = os.path.join(self.bin_dir, filename)
target = real_readlink(exe)
if target is None:
continue
if target.startswith(prefix):
yield exe
except OSError:
pass
def link_scripts(self, scripts):
rv = []
for script in scripts:
script_dst = os.path.join(
self.bin_dir, os.path.basename(script))
if publish_script(script, script_dst):
rv.append((script, script_dst))
return rv
def install(self, package, python=None, editable=False):
package, install_args = self.resolve_package(package, python)
venv_path = self.get_package_path(package)
if os.path.isdir(venv_path):
click.echo('%s is already installed' % package)
return
if not os.path.exists(self.bin_dir):
os.makedirs(self.bin_dir)
from subprocess import Popen
def _cleanup():
try:
shutil.rmtree(venv_path)
except (OSError, IOError):
pass
return False
# Install virtualenv, use the pipsi used python version by default
args = ['virtualenv', '-p', python or sys.executable, venv_path]
try:
if Popen(args).wait() != 0:
click.echo('Failed to create virtualenv. Aborting.')
return _cleanup()
args = [os.path.join(venv_path, BIN_DIR, 'pip'), 'install']
if editable:
args.append('--editable')
if Popen(args + install_args).wait() != 0:
click.echo('Failed to pip install. Aborting.')
return _cleanup()
except Exception:
_cleanup()
raise
# Find all the scripts
scripts = find_scripts(venv_path, package)
# And link them
linked_scripts = self.link_scripts(scripts)
# We did not link any, rollback.
if not linked_scripts:
click.echo('Did not find any scripts. Uninstalling.')
return _cleanup()
return True
def uninstall(self, package):
path = self.get_package_path(package)
if not os.path.isdir(path):
return UninstallInfo(package, installed=False)
paths = [path]
paths.extend(self.find_installed_executables(path))
return UninstallInfo(package, paths)
def upgrade(self, package, editable=False):
package, install_args = self.resolve_package(package)
venv_path = self.get_package_path(package)
if not os.path.isdir(venv_path):
click.echo('%s is not installed' % package)
return
from subprocess import Popen
old_scripts = set(find_scripts(venv_path, package))
args = [os.path.join(venv_path, BIN_DIR, 'pip'), 'install',
'--upgrade']
if editable:
args.append('--editable')
if Popen(args + install_args).wait() != 0:
click.echo('Failed to upgrade through pip. Aborting.')
return
scripts = find_scripts(venv_path, package)
linked_scripts = self.link_scripts(scripts)
to_delete = old_scripts - set(x[0] for x in linked_scripts)
for script_src, script_link in linked_scripts:
if script_src in to_delete:
try:
click.echo(' Removing old script %s' % script_src)
os.remove(script_link)
except (IOError, OSError):
pass
def list_everything(self):
venvs = {}
python = '/Scripts/python.exe' if IS_WIN else '/bin/python'
for venv in os.listdir(self.home):
venv_path = os.path.join(self.home, venv)
if os.path.isdir(venv_path) and \
os.path.isfile(venv_path + python):
venvs[venv] = []
def _find_venv(target):
for venv in venvs:
if target.startswith(join(self.home, venv, '')):
return venv
for script in os.listdir(self.bin_dir):
exe = os.path.join(self.bin_dir, script)
target = real_readlink(exe)
if target is None:
continue
venv = _find_venv(target)
if venv is not None:
venvs[venv].append(script)
return sorted(venvs.items())
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option(
'--home', type=click.Path(),envvar='PIPSI_HOME',
default=os.path.expanduser('~/.local/venvs'),
help='The folder that contains the virtualenvs.')
@click.option(
'--bin-dir', type=click.Path(),
envvar='PIPSI_BIN_DIR',
default=os.path.expanduser('~/.local/bin'),
help='The path where the scripts are symlinked to.')
@click.version_option(
message='%(prog)s, version %(version)s, python ' + str(sys.executable))
@click.pass_context
def cli(ctx, home, bin_dir):
"""pipsi is a tool that uses virtualenv and pip to install shell
tools that are separated from each other.
"""
ctx.obj = Repo(home, bin_dir)
@cli.command()
@click.argument('package')
@click.option('--python', default=None,
help='The python interpreter to use.')
@click.option('--editable', is_flag=True,
help='Enable editable installation. This only works for '
'locally installed packages.')
@click.pass_obj
def install(repo, package, python, editable):
"""Installs scripts from a Python package.
Given a package this will install all the scripts and their dependencies
of the given Python package into a new virtualenv and symlinks the
discovered scripts into BIN_DIR (defaults to ~/.local/bin).
"""
if repo.install(package, python, editable):
click.echo('Done.')
else:
sys.exit(1)
@cli.command()
@click.argument('package')
@click.option('--editable', is_flag=True,
help='Enable editable installation. This only works for '
'locally installed packages.')
@click.pass_obj
def upgrade(repo, package, editable):
"""Upgrades an already installed package."""
if repo.upgrade(package, editable):
click.echo('Done.')
else:
sys.exit(1)
@cli.command(short_help='Uninstalls scripts of a package.')
@click.argument('package')
@click.option('--yes', is_flag=True, help='Skips all prompts.')
@click.pass_obj
def uninstall(repo, package, yes):
"""Uninstalls all scripts of a Python package and cleans up the
virtualenv.
"""
uinfo = repo.uninstall(package)
if not uinfo.installed:
click.echo('%s is not installed' % package)
else:
click.echo('The following paths will be removed:')
for path in uinfo.paths:
click.echo(' %s' % click.format_filename(path))
click.echo()
if yes or click.confirm('Do you want to uninstall %s?' % package):
uinfo.perform()
click.echo('Done!')
else:
click.echo('Aborted!')
sys.exit(1)
@cli.command('list')
@click.pass_obj
def list_cmd(repo):
"""Lists all scripts installed through pipsi."""
click.echo('Packages and scripts installed through pipsi:')
for venv, scripts in repo.list_everything():
if not scripts:
continue
click.echo(' Package "%s":' % venv)
for script in scripts:
click.echo(' ' + script)
if __name__ == '__main__':
cli()
| bsd-3-clause | 5,156,685,130,349,038,000 | 29.271226 | 76 | 0.579275 | false |
MerleLK/StudentSystem | teacher/migrations/0001_initial.py | 1 | 1390 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-12 10:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CourseInfo',
fields=[
('course_id', models.BigIntegerField(primary_key=True, serialize=False)),
('course_name', models.CharField(max_length=100)),
('description', models.CharField(max_length=200)),
('for_grade', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='TeacherMessage',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('age', models.IntegerField()),
('sex', models.CharField(default='MAN', max_length=10)),
('college', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='courseinfo',
name='teacher_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course_teacher', to='teacher.TeacherMessage'),
),
]
| gpl-3.0 | 3,426,778,220,125,253,600 | 32.902439 | 141 | 0.564748 | false |
mp2893/retain | process_mimic.py | 1 | 5376 | # This script processes MIMIC-III dataset and builds longitudinal diagnosis records for patients with at least two visits.
# The output data are cPickled, and suitable for training Doctor AI or RETAIN
# Written by Edward Choi ([email protected])
# Usage: Put this script to the foler where MIMIC-III CSV files are located. Then execute the below command.
# python process_mimic.py ADMISSIONS.csv DIAGNOSES_ICD.csv PATIENTS.csv <output file>
# Output files
# <output file>.pids: List of unique Patient IDs. Used for intermediate processing
# <output file>.morts: List of binary values indicating the mortality of each patient
# <output file>.dates: List of List of Python datetime objects. The outer List is for each patient. The inner List is for each visit made by each patient
# <output file>.seqs: List of List of List of integer diagnosis codes. The outer List is for each patient. The middle List contains visits made by each patient. The inner List contains the integer diagnosis codes that occurred in each visit
# <output file>.types: Python dictionary that maps string diagnosis codes to integer diagnosis codes.
import sys
import cPickle as pickle
from datetime import datetime
def convert_to_icd9(dxStr):
if dxStr.startswith('E'):
if len(dxStr) > 4: return dxStr[:4] + '.' + dxStr[4:]
else: return dxStr
else:
if len(dxStr) > 3: return dxStr[:3] + '.' + dxStr[3:]
else: return dxStr
def convert_to_3digit_icd9(dxStr):
if dxStr.startswith('E'):
if len(dxStr) > 4: return dxStr[:4]
else: return dxStr
else:
if len(dxStr) > 3: return dxStr[:3]
else: return dxStr
if __name__ == '__main__':
admissionFile = sys.argv[1]
diagnosisFile = sys.argv[2]
patientsFile = sys.argv[3]
outFile = sys.argv[4]
print 'Collecting mortality information'
pidDodMap = {}
infd = open(patientsFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
pid = int(tokens[1])
dod_hosp = tokens[5]
if len(dod_hosp) > 0:
pidDodMap[pid] = 1
else:
pidDodMap[pid] = 0
infd.close()
print 'Building pid-admission mapping, admission-date mapping'
pidAdmMap = {}
admDateMap = {}
infd = open(admissionFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
pid = int(tokens[1])
admId = int(tokens[2])
admTime = datetime.strptime(tokens[3], '%Y-%m-%d %H:%M:%S')
admDateMap[admId] = admTime
if pid in pidAdmMap: pidAdmMap[pid].append(admId)
else: pidAdmMap[pid] = [admId]
infd.close()
print 'Building admission-dxList mapping'
admDxMap = {}
admDxMap_3digit = {}
infd = open(diagnosisFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
admId = int(tokens[2])
dxStr = 'D_' + convert_to_icd9(tokens[4][1:-1]) ############## Uncomment this line and comment the line below, if you want to use the entire ICD9 digits.
dxStr_3digit = 'D_' + convert_to_3digit_icd9(tokens[4][1:-1])
if admId in admDxMap:
admDxMap[admId].append(dxStr)
else:
admDxMap[admId] = [dxStr]
if admId in admDxMap_3digit:
admDxMap_3digit[admId].append(dxStr_3digit)
else:
admDxMap_3digit[admId] = [dxStr_3digit]
infd.close()
print 'Building pid-sortedVisits mapping'
pidSeqMap = {}
pidSeqMap_3digit = {}
for pid, admIdList in pidAdmMap.iteritems():
if len(admIdList) < 2: continue
sortedList = sorted([(admDateMap[admId], admDxMap[admId]) for admId in admIdList])
pidSeqMap[pid] = sortedList
sortedList_3digit = sorted([(admDateMap[admId], admDxMap_3digit[admId]) for admId in admIdList])
pidSeqMap_3digit[pid] = sortedList_3digit
print 'Building pids, dates, mortality_labels, strSeqs'
pids = []
dates = []
seqs = []
morts = []
for pid, visits in pidSeqMap.iteritems():
pids.append(pid)
morts.append(pidDodMap[pid])
seq = []
date = []
for visit in visits:
date.append(visit[0])
seq.append(visit[1])
dates.append(date)
seqs.append(seq)
print 'Building pids, dates, strSeqs for 3digit ICD9 code'
seqs_3digit = []
for pid, visits in pidSeqMap_3digit.iteritems():
seq = []
for visit in visits:
seq.append(visit[1])
seqs_3digit.append(seq)
print 'Converting strSeqs to intSeqs, and making types'
types = {}
newSeqs = []
for patient in seqs:
newPatient = []
for visit in patient:
newVisit = []
for code in visit:
if code in types:
newVisit.append(types[code])
else:
types[code] = len(types)
newVisit.append(types[code])
newPatient.append(newVisit)
newSeqs.append(newPatient)
print 'Converting strSeqs to intSeqs, and making types for 3digit ICD9 code'
types_3digit = {}
newSeqs_3digit = []
for patient in seqs_3digit:
newPatient = []
for visit in patient:
newVisit = []
for code in set(visit):
if code in types_3digit:
newVisit.append(types_3digit[code])
else:
types_3digit[code] = len(types_3digit)
newVisit.append(types_3digit[code])
newPatient.append(newVisit)
newSeqs_3digit.append(newPatient)
pickle.dump(pids, open(outFile+'.pids', 'wb'), -1)
pickle.dump(dates, open(outFile+'.dates', 'wb'), -1)
pickle.dump(morts, open(outFile+'.morts', 'wb'), -1)
pickle.dump(newSeqs, open(outFile+'.seqs', 'wb'), -1)
pickle.dump(types, open(outFile+'.types', 'wb'), -1)
pickle.dump(newSeqs_3digit, open(outFile+'.3digitICD9.seqs', 'wb'), -1)
pickle.dump(types_3digit, open(outFile+'.3digitICD9.types', 'wb'), -1)
| bsd-3-clause | -4,906,818,398,973,014,000 | 31.581818 | 240 | 0.69308 | false |
cytomine/Cytomine-python-datamining | cytomine-applications/classification_validation/add_and_run_job.py | 1 | 16322 | # -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2016. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Stévens Benjamin <[email protected]>"
__contributors__ = ["Marée Raphael <[email protected]>"]
__copyright__ = "Copyright 2010-2016 University of Liège, Belgium, http://www.cytomine.be/"
import sys
import cytomine
import os
from cytomine.models import *
from cytomine.utils import parameters_values_to_argv
from pyxit import pyxitstandalone
import argparse, optparse
import time
#Examples (default) of parameter values through command-line args
parameters = {
'cytomine_host' : None,
'cytomine_public_key' : None,
'cytomine_private_key' : None,
'cytomine_base_path' : None,
'cytomine_working_path' : '$HOME/tmp/cytomine/annotations/',
'cytomine_id_software' : 816476, #id of the pyxit validation software
'cytomine_id_project' : 716498, #id of the project to which annotation classifications will be uploaded
'cytomine_zoom_level' : 1,
'cytomine_dump_type' : 1, # type of the crop of the annotation (1=full crop, 2=crop with mask)
'cytomine_annotation_projects' : [716498], #id of projets from which we dump annotations to build the training dataset
'cytomine_excluded_terms' : [676131,676210,676176,], #do not use these cytomine terms
'cytomine_reviewed': True,
'cytomine_fixed_tile':False,
'cytomine_n_shifts':0,
}
#Examples (default) of parameter values to be set through command-line args
pyxit_parameters = {
'dir_ls' : "/",
'forest_shared_mem' : False,
#processing
'pyxit_n_jobs' : 10,
#subwindows extraction
'pyxit_n_subwindows' : 100,
'pyxit_min_size' : 0.1,
'pyxit_max_size' : 1.0,
'pyxit_target_width' : 16, #24x24 en zoom 3 sur agar/pgp
'pyxit_target_height' : 16,
'pyxit_interpolation' : 1,
'pyxit_transpose' : 1, #do we apply rotation/mirroring to subwindows (to enrich training set)
'pyxit_colorspace' : 2, # which colorspace do we use ?
'pyxit_fixed_size' : False, #do we extracted fixed sizes or random sizes (false)
#classifier parameters
'forest_n_estimators' : 10, #number of trees
'forest_max_features' : 28, #number of attributes considered at each node
'forest_min_samples_split' : 1, #nmin
'svm' : 0,
'svm_c': 1.0,
#evaluation protocol
'cv_k_folds' : 10,
'cv_shuffle' : False,
'cv_shuffle_test_fraction' : 0.3,
}
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def publish_predict(conn, annotations, y_hat, y_hat_proba):
i = 0
for annotation in annotations.data():
for term in annotation.term:
annotation_term = AnnotationTerm()
annotation_term._callback_identifier = 'algoannotationterm'
annotation_term.annotation = annotation.id
annotation_term.expectedTerm = term
annotation_term.term = int(y_hat[i])
annotation_term.rate = y_hat_proba[i]
conn.save(annotation_term)
i += 1
def main(argv):
# Define command line options
p = optparse.OptionParser(description='Pyxit/Cytomine Classification model Builder',
prog='PyXit Classification Model Builder (PYthon piXiT)')
p.add_option("--cytomine_host", type="string", default = '', dest="cytomine_host", help="The Cytomine host (eg: beta.cytomine.be, localhost:8080)")
p.add_option('--cytomine_public_key', type="string", default = '', dest="cytomine_public_key", help="Cytomine public key")
p.add_option('--cytomine_private_key',type="string", default = '', dest="cytomine_private_key", help="Cytomine private key")
p.add_option('--cytomine_base_path', type="string", default = '/api/', dest="cytomine_base_path", help="Cytomine base path")
p.add_option('--cytomine_id_software', type="int", dest="cytomine_id_software", help="The Cytomine software identifier")
p.add_option('--cytomine_working_path', default="/tmp/", type="string", dest="cytomine_working_path", help="The working directory (eg: /tmp)")
p.add_option('--cytomine_id_project', type="int", dest="cytomine_id_project", help="The Cytomine project identifier")
p.add_option('-z', '--cytomine_zoom_level', type='int', dest='cytomine_zoom_level', help="working zoom level")
p.add_option('--cytomine_dump_type', type='int', dest='cytomine_dump_type', help="annotation type (1=crop, 2=alphamask)")
#p.add_option('--cytomine_fixed_tile', default=False,action="store_true", dest="cytomine_fixed_tile", help="Force fixed tile size crop around annotations")
p.add_option('--cytomine_fixed_tile', type="string", default="False",dest="cytomine_fixed_tile", help="Force fixed tile size crop around annotations")
p.add_option('--cytomine_n_shifts',type='int', dest='cytomine_n_shifts',help="number of translated (shifted) crops extracted for each annotation")
p.add_option('--cytomine_annotation_projects', type="string", dest="cytomine_annotation_projects", help="Projects from which annotations are extracted")
p.add_option('--cytomine_excluded_terms', type='string', default='5735', dest='cytomine_excluded_terms', help="term ids of excluded terms")
#p.add_option('--cytomine_reviewed', default=False, action="store_true", dest="cytomine_reviewed", help="Get reviewed annotations only")
p.add_option('--cytomine_reviewed', type='string', default="False", dest="cytomine_reviewed", help="Get reviewed annotations only")
p.add_option('--pyxit_target_width', type='int', dest='pyxit_target_width', help="pyxit subwindows width")
p.add_option('--pyxit_target_height', type='int', dest='pyxit_target_height', help="pyxit subwindows height")
p.add_option('--pyxit_colorspace', type='int', dest='pyxit_colorspace', help="pyxit colorspace encoding") #future: get it from server db
p.add_option('--pyxit_n_jobs', type='int', dest='pyxit_n_jobs', help="pyxit number of jobs for trees") #future: get it from server db
p.add_option('--pyxit_n_subwindows', default=10, type="int", dest="pyxit_n_subwindows", help="number of subwindows")
p.add_option('--pyxit_min_size', default=0.5, type="float", dest="pyxit_min_size", help="min size")
p.add_option('--pyxit_max_size', default=1.0, type="float", dest="pyxit_max_size", help="max size")
p.add_option('--pyxit_interpolation', default=2, type="int", dest="pyxit_interpolation", help="interpolation method 1,2,3,4")
#p.add_option('--pyxit_transpose', default=False, action="store_true", dest="pyxit_transpose", help="transpose subwindows")
p.add_option('--pyxit_transpose', type="string", default="False", dest="pyxit_transpose", help="transpose subwindows")
#p.add_option('--pyxit_fixed_size', default=False, action="store_true", dest="pyxit_fixed_size", help="extract fixed size subwindows")
p.add_option('--pyxit_fixed_size', type="string", default="False", dest="pyxit_fixed_size", help="extract fixed size subwindows")
p.add_option('--forest_n_estimators', default=10, type="int", dest="forest_n_estimators", help="number of base estimators (T)")
p.add_option('--forest_max_features' , default=1, type="int", dest="forest_max_features", help="max features at test node (k)")
p.add_option('--forest_min_samples_split', default=1, type="int", dest="forest_min_samples_split", help="minimum node sample size (nmin)")
p.add_option('--svm', default=0, dest="svm", help="final svm classifier: 0=nosvm, 1=libsvm, 2=liblinear, 3=lr-l1, 4=lr-l2", type="int")
p.add_option('--svm_c', default=1.0, type="float", dest="svm_c", help="svm C")
p.add_option('--cv_k_folds', default=False, type="int", dest="cv_k_folds", help="number of cross validation folds")
#p.add_option('--cv_shuffle', default=False, action="store_true", dest="cv_shuffle", help="shuffle splits in cross validation")
p.add_option('--cv_shuffle', type="string", default="False", dest="cv_shuffle", help="shuffle splits in cross validation")
p.add_option('--cv_shuffle_test_fraction', default=0.3, type="float", dest="cv_shuffle_test_fraction", help="shuffle fraction in cross validation")
p.add_option('--verbose', type="string", default="0", dest="verbose", help="Turn on (1) or off (0) verbose mode")
options, arguments = p.parse_args( args = argv)
parameters['cytomine_host'] = options.cytomine_host
parameters['cytomine_public_key'] = options.cytomine_public_key
parameters['cytomine_private_key'] = options.cytomine_private_key
parameters['cytomine_base_path'] = options.cytomine_base_path
parameters['cytomine_working_path'] = options.cytomine_working_path
parameters['cytomine_base_path'] = options.cytomine_base_path
parameters['cytomine_id_project'] = options.cytomine_id_project
parameters['cytomine_id_software'] = options.cytomine_id_software
parameters['cytomine_annotation_projects'] = map(int,options.cytomine_annotation_projects.split(','))
parameters['cytomine_excluded_terms'] = map(int,options.cytomine_excluded_terms.split(','))
parameters['cytomine_zoom_level'] = options.cytomine_zoom_level
parameters['cytomine_dump_type'] = options.cytomine_dump_type
parameters['cytomine_fixed_tile'] = str2bool(options.cytomine_fixed_tile)
parameters['cytomine_n_shifts'] = options.cytomine_n_shifts
parameters['cytomine_reviewed'] = str2bool(options.cytomine_reviewed)
pyxit_parameters['pyxit_target_width'] = options.pyxit_target_width
pyxit_parameters['pyxit_target_height'] = options.pyxit_target_height
pyxit_parameters['pyxit_n_subwindows'] = options.pyxit_n_subwindows
pyxit_parameters['pyxit_min_size'] = options.pyxit_min_size
pyxit_parameters['pyxit_max_size'] = options.pyxit_max_size
pyxit_parameters['pyxit_colorspace'] = options.pyxit_colorspace
pyxit_parameters['pyxit_interpolation'] = options.pyxit_interpolation
pyxit_parameters['pyxit_transpose'] = str2bool(options.pyxit_transpose)
pyxit_parameters['pyxit_fixed_size'] = str2bool(options.pyxit_fixed_size)
pyxit_parameters['forest_n_estimators'] = options.forest_n_estimators
pyxit_parameters['forest_max_features'] = options.forest_max_features
pyxit_parameters['forest_min_samples_split'] = options.forest_min_samples_split
pyxit_parameters['svm'] = options.svm
pyxit_parameters['svm_c'] = options.svm_c
pyxit_parameters['cv_k_folds'] = options.cv_k_folds
pyxit_parameters['cv_shuffle'] = str2bool(options.cv_shuffle)
pyxit_parameters['cv_shuffle_test_fraction'] = options.cv_shuffle_test_fraction
pyxit_parameters['pyxit_n_jobs'] = options.pyxit_n_jobs
# Check for errors in the options
if options.verbose:
print "[pyxit.main] Options = ", options
# Create JOB/USER/JOB
conn = cytomine.Cytomine(parameters["cytomine_host"],
parameters["cytomine_public_key"],
parameters["cytomine_private_key"] ,
base_path = parameters['cytomine_base_path'],
working_path = parameters['cytomine_working_path'],
verbose= str2bool(options.verbose))
#Create a new userjob if connected as human user
current_user = conn.get_current_user()
run_by_user_job = False
if current_user.algo==False:
print "adduserJob..."
user_job = conn.add_user_job(parameters['cytomine_id_software'], parameters['cytomine_id_project'])
print "set_credentials..."
conn.set_credentials(str(user_job.publicKey), str(user_job.privateKey))
print "done"
else:
user_job = current_user
print "Already running as userjob"
run_by_user_job = True
job = conn.get_job(user_job.job)
#get annotation collection from Cytomine
annotations = conn.get_annotations(id_project = parameters['cytomine_id_project'], reviewed_only = parameters['cytomine_reviewed'])
#set output dir parameters
pyxit_parameters['dir_ls'] = os.path.join(parameters["cytomine_working_path"], str(parameters['cytomine_annotation_projects']).replace(',','-').replace('[','').replace(']','').replace(' ',''), "zoom_level", str(parameters['cytomine_zoom_level']),"dump_type",str(parameters['cytomine_dump_type']))
if not os.path.exists(pyxit_parameters['dir_ls']):
print "Creating annotation directory: %s" %pyxit_parameters['dir_ls']
os.makedirs(pyxit_parameters['dir_ls'])
time.sleep(2)
#image dump type
if (parameters['cytomine_dump_type']==1):
annotation_get_func = Annotation.get_annotation_crop_url
elif (parameters['cytomine_dump_type']==2):
annotation_get_func = Annotation.get_annotation_alpha_crop_url
else:
print "default annotation type crop"
annotation_get_func = Annotation.get_annotation_crop_url
#dump annotation crops
job = conn.update_job_status(job, status = job.RUNNING, status_comment = "Dump annotations...", progress = 50)
annotations=conn.dump_annotations(annotations = annotations, get_image_url_func = annotation_get_func, dest_path = pyxit_parameters['dir_ls'], desired_zoom = parameters['cytomine_zoom_level'],excluded_terms=parameters['cytomine_excluded_terms'], tile_size = parameters['cytomine_fixed_tile'], translate = parameters['cytomine_n_shifts'])
#build pyxit model(s) and evaluate them (according to cross-validation parameters)
print "Create software parameters values..."
if run_by_user_job==False:
parameters_values = conn.add_job_parameters(user_job.job, conn.get_software(parameters['cytomine_id_software']), pyxit_parameters)
argv = parameters_values_to_argv(pyxit_parameters, parameters_values)
else:
argv = []
for key in pyxit_parameters:
value = pyxit_parameters[key]
if type(value) is bool or value == 'True':
if bool(value):
argv.append("--%s" % key)
elif not value == 'False':
argv.append("--%s" % key)
argv.append("%s" % value)
print "Run PyXiT..."
print argv
job = conn.update_job_status(job, status = job.RUNNING, status_comment = "Build models...", progress = 75)
predict = pyxitstandalone.main(argv)
print "------------------- Publishing results to Cytomine Core... ----------------------"
print annotations.data()
job = conn.update_job_status(job, status = job.RUNNING, status_comment = "Publishing results...", progress = 90)
for annotation in annotations.data():
#print "ANNOTATION: %s" %annotation
#print "ANNOTATION TERM: %s" %annotation.term
for term in annotation.term:
#annot_descr = conn.get_annotation(annotation.id)
#if hasattr(annotation, "filename"):
# print "filename: %s" %annotation.filename
# time.sleep(1)
if hasattr(annotation, "filename") and (annotation.filename in predict) :
print "PUBLISH annotation %s prediction" %annotation
p = predict[annotation.filename]
annotation_term = AlgoAnnotationTerm()
annotation_term.annotation = annotation.id
annotation_term.expectedTerm = term
annotation_term.term = p[0]
if (pyxit_parameters['svm'] == 1): #libsvm does not return proba
annotation_term.rate = 1.0
else:
annotation_term.rate = p[1]
conn.add_annotation_term(annotation.id, p[0], term, annotation_term.rate, annotation_term_model = cytomine.models.AlgoAnnotationTerm)
job = conn.update_job_status(job, status = job.TERMINATED, status_comment = "Finish", progress = 100)
print "END."
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| apache-2.0 | 1,978,683,460,693,328,600 | 53.761745 | 341 | 0.675164 | false |
ksmit799/Toontown-Source | toontown/coghq/GameSprite3D.py | 1 | 13284 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
import math
import random
class GameSprite:
colorRed = Vec4(1, 0.2, 0.2, 1)
colorBlue = Vec4(0.7, 0.8, 1, 1)
colorGreen = Vec4(0, 1, 0, 1)
colorGhostRed = Vec4(1, 0.2, 0.2, 0.5)
colorGhostBlue = Vec4(0.7, 0.8, 1, 0.5)
colorGhostGreen = Vec4(0, 1, 0, 0.5)
colorDisolveRed = Vec4(1, 0.2, 0.2, 0.0)
colorDisolveBlue = Vec4(0.7, 0.8, 1, 0.0)
colorDisolveGreen = Vec4(0, 1, 0, 0.0)
colorWhite = Vec4(1, 1, 1, 1)
colorBlack = Vec4(0, 0, 0, 1.0)
colorDisolveWhite = Vec4(1, 1, 1, 0.0)
colorDisolveBlack = Vec4(0, 0, 0, 0.0)
colorShadow = Vec4(0, 0, 0, 0.5)
colorPurple = Vec4(1.0, 0, 1.0, 1.0)
colorDisolvePurple = Vec4(1.0, 0, 1.0, 0.0)
colorYellow = Vec4(1.0, 1.0, 0.0, 1.0)
colorDisolveYellow = Vec4(1.0, 1.0, 0.0, 0.0)
colorOrange = Vec4(1.0, 0.5, 0.0, 1.0)
colorDisolveOrange = Vec4(1.0, 0.5, 0.0, 0.0)
colorAqua = Vec4(0.0, 1.0, 1.0, 1.0)
colorDisolveAqua = Vec4(0.0, 1.0, 1.0, 0.0)
colorSteel = Vec4(0.5, 0.5, 0.5, 1.0)
colorSteelDissolve = Vec4(0.5, 0.5, 0.5, 0.0)
colorList = (colorRed,
colorBlue,
colorGreen,
colorWhite,
colorBlack,
colorPurple,
colorYellow,
colorOrange,
colorAqua,
colorSteel)
disolveList = (colorDisolveRed,
colorDisolveBlue,
colorDisolveGreen,
colorDisolveWhite,
colorDisolveBlack,
colorDisolvePurple,
colorDisolveYellow,
colorDisolveOrange,
colorDisolveAqua,
colorSteelDissolve)
def __init__(self, spriteBase, size, colorType = 0, foundation = 0, facing = 0):
self.colorType = colorType
self.spriteBase = spriteBase
self.frame = self.spriteBase.getParent()
self.foundation = foundation
self.sizeMult = 1.4
self.velX = 0
self.velZ = 0
self.prevX = 0
self.prevZ = 0
self.isActive = 0
self.canCollide = 1
self.accX = None
self.accZ = None
self.delayRemove = 0
self.giftId = None
self.holdType = None
self.multiColor = 0
self.multiColorList = [0,
1,
2,
6]
self.multiColorIndex = 0
self.multiColorNext = 1
self.multiColorLevel = 0.0
self.multiColorStep = 0.025
self.facing = facing
self.breakable = 1
self.deleteFlag = 0
self.nodeObj = None
self.inputSize = size
myColor = GameSprite.colorWhite
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_white'
self.setBallType(colorType)
self.size = 0.4 * self.sizeMult
self.isQue = 0
self.nodeObj.setTransparency(TransparencyAttrib.MAlpha)
self.markedForDeath = 0
self.gridPosX = None
self.gridPosZ = None
return
def setBallType(self, type, solidOverride = 0):
if not self.nodeObj or self.nodeObj.isEmpty():
self.nodeObj = None
else:
self.nodeObj.remove()
colorType = type
self.multiColor = 0
self.breakable = 1
solid = self.foundation
if solidOverride:
solid = 1
myColor = GameSprite.colorWhite
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_white'
if not solid or colorType > 9:
if colorType == 0:
myColor = GameSprite.colorGhostRed
elif colorType == 1:
myColor = GameSprite.colorGhostBlue
elif colorType == 2:
myColor = GameSprite.colorGhostGreen
elif colorType == 3:
myColor = GameSprite.colorWhite
elif colorType == 4:
myColor = GameSprite.colorBlack
elif colorType == 5:
myColor = GameSprite.colorPurple
elif colorType == 6:
myColor = GameSprite.colorYellow
elif colorType == 7:
myColor = GameSprite.colorOrange
self.multiColor = 1
self.multiColorList = [7, 4]
self.multiColorIndex = 0
self.multiColorNext = 1
self.multiColorLevel = 0.0
self.multiColorStep = 0.1
elif colorType == 8:
myColor = GameSprite.colorAqua
self.multiColor = 1
self.multiColorList = [0,
1,
2,
6]
self.multiColorIndex = 0
self.multiColorNext = 1
self.multiColorLevel = 0.0
self.multiColorStep = 0.025
elif colorType == 9:
myColor = GameSprite.colorSteel
self.breakable = 0
elif colorType == 10:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_fire'
self.giftId = 7
self.colorType = 0
elif colorType == 11:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_unknown'
self.giftId = 8
self.colorType = 1
elif colorType == 0:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_red'
elif colorType == 1:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_blue'
elif colorType == 2:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_green'
elif colorType == 3:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_cog'
elif colorType == 4:
myColor = GameSprite.colorBlack
elif colorType == 5:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_purple'
elif colorType == 6:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_yello'
elif colorType == 7:
myColor = GameSprite.colorOrange
self.multiColor = 1
self.multiColorList = [7, 4]
self.multiColorIndex = 0
self.multiColorNext = 1
self.multiColorLevel = 0.0
self.multiColorStep = 0.15
elif colorType == 8:
myColor = GameSprite.colorAqua
self.multiColor = 1
self.multiColorList = [0,
1,
2,
6]
self.multiColorIndex = 0
self.multiColorNext = 1
self.multiColorLevel = 0.0
self.multiColorStep = 0.1
elif colorType == 9:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_steel'
if not myModel:
import pdb
pdb.set_trace()
self.breakable = 0
elif colorType == 10:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_fire'
self.giftId = 7
self.colorType = 0
elif colorType == 11:
myModel = 'phase_12/models/bossbotHQ/bust_a_cog_ball_unknown'
self.giftId = 8
self.colorType = 1
self.nodeObj = loader.loadModel(myModel)
self.nodeObj.setScale(self.inputSize)
self.nodeObj.reparentTo(self.spriteBase)
self.setColor(myColor)
return
def removeDelay(self):
self.delayRemove = 0
def delete(self):
if not self.delayRemove:
self.spriteBase.removeNode()
self.deleteFlag = 1
def face(self):
frameZ = self.frame.getZ()
tilt = -95.0 + (self.getZ() + frameZ) * 2.0
self.nodeObj.setP(-tilt)
def runColor(self):
if self.multiColor:
c1 = GameSprite.colorList[self.multiColorList[self.multiColorIndex]]
c2 = GameSprite.colorList[self.multiColorList[self.multiColorNext]]
iLevel = 1.0 - self.multiColorLevel
mixColor = c1 * iLevel + c2 * self.multiColorLevel
self.nodeObj.setColorScale(mixColor)
self.multiColorLevel += self.multiColorStep
if self.multiColorLevel > 1.0:
self.multiColorLevel = 0.0
self.multiColorIndex += 1
if self.multiColorIndex >= len(self.multiColorList):
self.multiColorIndex = 0
self.multiColorNext = self.multiColorIndex + 1
if self.multiColorNext >= len(self.multiColorList):
self.multiColorNext = 0
def run(self, timeDelta):
if self.facing:
self.face()
self.runColor()
if self.isActive and not self.isQue:
self.prevX = self.spriteBase.getX()
self.prevZ = self.spriteBase.getZ()
self.setX(self.getX() + self.velX * timeDelta)
self.setZ(self.getZ() + self.velZ * timeDelta)
self.velX = self.velX * (1 - timeDelta * 4)
self.velZ = self.velZ * (1 - timeDelta * 4)
if self.accX != None:
self.velX = self.accX
self.velZ = self.accZ
if self.nodeObj.isEmpty():
self.markedForDeath = 1
return
def reflectX(self):
self.velX = -self.velX
if self.accX != None:
self.accX = -self.accX
return
def reflectZ(self):
self.velZ = -self.velZ
if self.accZ != None:
self.accZ = -self.accZ
return
def warningBump(self):
num1 = random.random() * 2.0
num2 = random.random() * 2.0
num3 = random.random() * 2.0
curr = self.nodeObj.getPos()
dest = Point3(0 + curr[0], 0 + curr[1], 1.0 + curr[2])
track = Sequence(Wait(num1 * 0.1), LerpPosInterval(self.nodeObj, num2 * 0.1, Point3(0.0, 0.0, 0.5)), LerpPosInterval(self.nodeObj, num3 * 0.1, Point3(0.0, 0.0, 0.0)), LerpPosInterval(self.nodeObj, num2 * 0.1, Point3(0.0, 0.0, 0.5)), LerpPosInterval(self.nodeObj, num1 * 0.1, Point3(0.0, 0.0, 0.0)))
track.start()
def shake(self):
num1 = random.random() * 1.0
num2 = random.random() * 1.0
curr = self.nodeObj.getPos()
dest = Point3(0 + curr[0], 0 + curr[1], 1.0 + curr[2])
track = Sequence(LerpPosInterval(self.nodeObj, num2 * 0.1, Point3(0.0, 0.0, 0.25)), LerpPosInterval(self.nodeObj, num1 * 0.1, Point3(0.0, 0.0, 0.0)))
track.start()
def deathEffect(self):
if self.spriteBase.isEmpty():
return
self.spriteBase.wrtReparentTo(render)
num1 = (random.random() - 0.5) * 1.0
num2 = random.random() * 1.0
num3 = random.random() * 1.0
notNum3 = 1.0 - num3
curr = self.spriteBase.getPos()
self.delayRemove = 1
self.canCollide = 0
track = Sequence(Parallel(ProjectileInterval(self.spriteBase, startVel=Vec3(-20.0 + notNum3 * 40.0, -20.0 + num3 * 40.0, 30), duration=0.5 + num2 * 1.0, gravityMult=2.0), LerpColorScaleInterval(self.spriteBase, duration=0.5 + num2 * 1.0, startColorScale=GameSprite.colorList[self.colorType], colorScale=GameSprite.disolveList[self.colorType])), Func(self.removeDelay), Func(self.delete))
track.start()
def wildEffect(self):
if self.spriteBase.isEmpty():
return
num1 = (random.random() - 0.5) * 1.0
num2 = random.random() * 1.0
num3 = random.random() * 1.0
notNum3 = 1.0 - num3
curr = self.spriteBase.getPos()
self.delayRemove = 1
self.canCollide = 0
track = Sequence(Parallel(LerpScaleInterval(self.spriteBase, 1.0, 1.5, startScale=1.0), LerpColorScaleInterval(self.spriteBase, duration=1.0, startColorScale=GameSprite.colorList[self.colorType], colorScale=Vec4(0, 0, 0, 0.0))), Func(self.removeDelay), Func(self.delete))
track.start()
def setActive(self, active):
if active:
self.isActive = 1
else:
self.isActive = 0
self.velX = 0
self.velZ = 0
self.accX = None
self.accZ = None
return
def getX(self):
if self.nodeObj.isEmpty():
return None
return self.spriteBase.getX()
def getZ(self):
if self.nodeObj.isEmpty():
return None
return self.spriteBase.getZ()
def setX(self, x):
if self.nodeObj.isEmpty():
return None
self.prevX = self.spriteBase.getX()
self.spriteBase.setX(x)
return None
def setZ(self, z):
if self.nodeObj.isEmpty():
return None
self.prevZ = self.spriteBase.getZ()
self.spriteBase.setZ(z)
return None
def addForce(self, force, direction):
if self.isActive:
forceX = math.cos(direction) * force
forceZ = math.sin(direction) * force
self.velX += forceX
self.velZ += forceZ
def setAccel(self, accel, direction):
accelX = math.cos(direction) * accel
accelZ = math.sin(direction) * accel
self.accX = accelX
self.accZ = accelZ
def setColorType(self, typeIndex):
self.colorType = typeIndex
self.setColor(GameSprite.colorList[typeIndex])
def setColor(self, trip):
self.nodeObj.setColorScale(trip[0], trip[1], trip[2], trip[3])
def collide(self):
if self.isActive:
self.setX(self.prevX)
self.setZ(self.prevZ)
| mit | -3,959,399,585,208,441,000 | 35.196185 | 395 | 0.559696 | false |
MediaKraken/MediaKraken_Deployment | source/common/common_pagination_bootstrap.py | 1 | 3264 | """
Copyright (C) 2020 Quinn D Granfor <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import math
from common import common_internationalization
def com_pagination_page_calc(request):
page = int(request.args.get('page', 1))
offset = (page * int(request.ctx.session['per_page'])) - int(request.ctx.session['per_page'])
return page, offset
def com_pagination_boot_html(page, url, item_count=0,
client_items_per_page=30, format_number=True):
"""
Set items and count per page
"""
# if everything fits on one page, don't paginate.
if item_count < client_items_per_page:
return None
# return '', 0
# start pagination calculations
pages = math.ceil(item_count / client_items_per_page)
pagination_links = '<ul class="pagination">'
# only do previous if not on first page
if page > 1:
link_number = str(page - 1)
pagination_links += '<li class="page-item">' \
'<a class="page-link" href="' + url + '?page=' + link_number \
+ '" aria-label="Previous">' \
'<span aria-hidden="true">«</span>' \
'<span class="sr-only">Previous</span>' \
'</a>' \
'</li>'
# if less than ten pages, just display all the pages
if pages < 10:
build_start = 1
build_stop = pages
else:
build_start = page
build_stop = page + 10
if build_stop > pages:
build_stop = pages
for ndx in range(build_start, build_stop):
link_number = str(ndx)
if format_number:
page_number = common_internationalization.com_inter_number_format(ndx)
else:
page_number = str(ndx)
pagination_links += '<li class="page-item"><a class="page-link"' \
' href="' + url + '?page=' + link_number + '">' \
+ page_number + '</a></li>'
# only do next if not on last page
if page < pages:
link_number = str(page + 1)
pagination_links += '<li class="page-item">' \
'<a class="page-link" href="' + url + '?page=' + link_number \
+ '" aria-label="Next">' \
'<span aria-hidden="true">»</span>' \
'<span class="sr-only">Next</span>' \
'</a>' \
'</li>'
pagination_links += '</ul>'
return pagination_links
| gpl-3.0 | -2,698,689,213,716,076,500 | 38.804878 | 97 | 0.551164 | false |
drinkertea/pywinauto | pywinauto/unittests/test_handleprops.py | 1 | 13319 | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handleprops.py"""
import unittest
import six
import os
import sys
import warnings
sys.path.append(".")
from pywinauto import win32structures
from pywinauto.handleprops import children, classname, clientrect, contexthelpid, \
controlid, dumpwindow, exstyle, font, has_exstyle, has_style, is64bitprocess, \
is_toplevel_window, isenabled, isunicode, isvisible, iswindow, parent, processid, \
rectangle, style, text, userdata, is64bitbinary
from pywinauto.application import Application
from pywinauto.sysinfo import is_x64_OS
from pywinauto.sysinfo import is_x64_Python
from pywinauto.timings import Timings
class HandlepropsTestCases(unittest.TestCase):
"""Unit tests for the handleprops module"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application().start("notepad")
self.dlghandle = self.app.UntitledNotepad.handle
self.edit_handle = self.app.UntitledNotepad.Edit.handle
def tearDown(self):
"""Close the application after tests"""
# close the application
#self.dlg.SendMessage(win32defines.WM_CLOSE)
#self.app.UntitledNotepad.menu_select("File->Exit")
self.app.kill()
def test_text(self):
"""Make sure the text method returns correct result"""
self.assertEqual("Untitled - Notepad", text(self.dlghandle))
self.assertEqual("", text(self.edit_handle))
self.assertEqual(None, text(sys.maxsize))
self.assertEqual(None, text(None))
def test_classname(self):
"""Make sure the classname method returns correct result"""
self.assertEqual("Notepad", classname(self.dlghandle))
self.assertEqual("Edit", classname(self.edit_handle))
self.assertEqual("", classname(sys.maxsize))
self.assertEqual(None, classname(None))
def test_parent(self):
"""Make sure the parent method returns correct result"""
self.assertEqual(0, parent(self.dlghandle))
self.assertEqual(self.dlghandle, parent(self.edit_handle))
def test_style(self):
"""Make sure the style method returns correct result"""
self.assertEqual(0x14cf0000, style(self.dlghandle))
# will be 0x50300104 if wordwrap is on and 0x50200104 if off
self.assertTrue(
(0x50200104, 0x50300104).__contains__,
style(self.edit_handle),)
def test_exstyle(self):
"""Make sure the exstyle method returns correct result"""
self.assertEqual(0x110, exstyle(self.dlghandle))
self.assertEqual(0x200, exstyle(self.edit_handle))
def test_controlid(self):
"""Make sure the controlid method returns correct result"""
#self.assertEqual(0, controlid(self.dlghandle))
self.assertEqual(15, controlid(self.edit_handle))
def test_userdata(self):
"""Make sure the userdata method returns correct result"""
self.assertEqual(0, userdata(self.dlghandle))
self.assertEqual(0, userdata(self.edit_handle))
def test_contexthelpid(self):
"""Make sure the contexthelpid method returns correct result"""
self.assertEqual(0, contexthelpid(self.dlghandle))
self.assertEqual(0, contexthelpid(self.edit_handle))
def test_iswindow(self):
"""Make sure the iswindow method returns correct result"""
self.assertEqual(True, iswindow(self.dlghandle))
self.assertEqual(True, iswindow(self.edit_handle))
self.assertEqual(False, iswindow(1))
self.assertEqual(False, iswindow(sys.maxsize))
self.assertEqual(False, iswindow(None))
def test_isvisible(self):
"""Make sure the isvisible method returns correct result"""
self.assertEqual(True, isvisible(self.dlghandle))
self.assertEqual(True, isvisible(self.edit_handle))
self.assertEqual(False, isvisible(sys.maxsize))
self.assertEqual(False, isvisible(None))
# need to check something invisible
#self.assertEqual(False, isvisible(self.edit_handle))
def test_isunicode(self):
"""Make sure the isunicode method returns correct result"""
self.assertEqual(True, isunicode(self.dlghandle))
self.assertEqual(True, isunicode(self.edit_handle))
self.assertEqual(False, isunicode(sys.maxsize))
self.assertEqual(False, isunicode(None))
# need to check something not unicode
#self.assertEqual(False, isunicode(self.edit_handle))
def test_isenabled(self):
"""Make sure the isenabled method returns correct result"""
self.assertEqual(False, isenabled(sys.maxsize))
self.assertEqual(False, isenabled(None))
self.assertEqual(True, isenabled(self.dlghandle))
self.assertEqual(True, isenabled(self.edit_handle))
self.app.UntitledNotepad.menu_select("Help->About Notepad")
self.app.AboutNotepad.wait('ready')
self.assertEqual(False, isenabled(self.dlghandle))
self.app.AboutNotepad.OK.close_click()
self.app.UntitledNotepad.menu_select("Edit->Replace")
self.assertEqual(
False,
isenabled(
self.app.Replace.child_window(
title_re = "Replace.*",
class_name = "Button",
enabled_only = False).handle))
self.app.Replace.Cancel.click()
def test_clientrect(self):
"""Make sure clientrect() function works"""
self.assertEqual(0, clientrect(self.dlghandle).left)
self.assertEqual(0, clientrect(self.edit_handle).left)
self.assertEqual(0, clientrect(self.dlghandle).top)
self.assertEqual(0, clientrect(self.edit_handle).top)
self.assertEqual(True,
rectangle(self.dlghandle).right > clientrect(self.dlghandle).right)
self.assertEqual(True,
rectangle(self.edit_handle).right > clientrect(self.edit_handle).right)
self.assertEqual(True,
rectangle(self.dlghandle).bottom > clientrect(self.dlghandle).bottom)
self.assertEqual(True,
rectangle(self.edit_handle).bottom > clientrect(self.edit_handle).bottom)
def test_rectangle(self):
"""Make sure rectangle() function works"""
dlgrect = rectangle(self.dlghandle)
self.assertEqual(True, dlgrect.left < dlgrect.right)
self.assertEqual(True, dlgrect.top < dlgrect.bottom)
editrect = rectangle(self.edit_handle)
self.assertEqual(True, editrect.left < editrect.right)
self.assertEqual(True, editrect.top < editrect.bottom)
def test_font(self):
"""Make sure font() function works"""
dlgfont = font(self.dlghandle)
self.assertEqual(True, isinstance(dlgfont.lfFaceName, six.string_types))
editfont = font(self.edit_handle)
self.assertEqual(True, isinstance(editfont.lfFaceName, six.string_types))
# handle.props font should return DEFAULT font for an invalid handle
# Check only for a returned type as the default font can vary
expected = win32structures.LOGFONTW()
self.assertEqual(type(expected), type(font(sys.maxsize)))
self.assertEqual(type(expected), type(font(None)))
def test_processid(self):
"""Make sure processid() function works"""
self.assertEqual(self.app.process, processid(self.dlghandle))
self.assertEqual(self.app.process, processid(self.edit_handle))
def test_children(self):
"""Make sure the children method returns correct result"""
self.assertEqual(2, len(children(self.dlghandle)))
self.assertEqual([], children(self.edit_handle))
def test_has_style(self):
"""Make sure the has_style method returns correct result"""
self.assertEqual(True, has_style(self.dlghandle, 0xf0000))
self.assertEqual(True, has_style(self.edit_handle, 0x4))
self.assertEqual(False, has_style(self.dlghandle, 4))
self.assertEqual(False, has_style(self.edit_handle, 1))
def test_has_exstyle(self):
"""Make sure the has_exstyle method returns correct result"""
self.assertEqual(True, has_exstyle(self.dlghandle, 0x10))
self.assertEqual(True, has_exstyle(self.edit_handle, 0x200))
self.assertEqual(False, has_exstyle(self.dlghandle, 4))
self.assertEqual(False, has_exstyle(self.edit_handle, 0x10))
def test_is_toplevel_window(self):
"""Make sure is_toplevel_window() function works"""
self.assertEqual(True, is_toplevel_window(self.dlghandle))
self.assertEqual(False, is_toplevel_window(self.edit_handle))
self.app.UntitledNotepad.menu_select("Edit->Replace")
self.assertEqual(True, is_toplevel_window(self.app.Replace.handle))
self.assertEqual(False, is_toplevel_window(self.app.Replace.Cancel.handle))
self.app.Replace.Cancel.click()
def test_is64bitprocess(self):
"""Make sure a 64-bit process detection returns correct results"""
if is_x64_OS():
# Test a 32-bit app running on x64
expected_is64bit = False
if is_x64_Python():
exe32bit = os.path.join(os.path.dirname(__file__),
r"..\..\apps\MFC_samples\RowList.exe")
app = Application().start(exe32bit, timeout=20)
pid = app.RowListSampleApplication.process_id()
res_is64bit = is64bitprocess(pid)
try:
self.assertEqual(expected_is64bit, res_is64bit)
finally:
# make sure to close an additional app we have opened
app.kill()
# setup expected for a 64-bit app on x64
expected_is64bit = True
else:
# setup expected for a 32-bit app on x86
expected_is64bit = False
# test native Notepad app
res_is64bit = is64bitprocess(self.app.UntitledNotepad.process_id())
self.assertEqual(expected_is64bit, res_is64bit)
def test_is64bitbinary(self):
exe32bit = os.path.join(os.path.dirname(__file__),
r"..\..\apps\MFC_samples\RowList.exe")
dll32bit = os.path.join(os.path.dirname(__file__),
r"..\..\apps\MFC_samples\mfc100u.dll")
self.assertEqual(is64bitbinary(exe32bit), False)
self.assertEqual(is64bitbinary(dll32bit), None)
warnings.filterwarnings('always', category=RuntimeWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
is64bitbinary(dll32bit)
assert len(w) >= 1
assert issubclass(w[-1].category, RuntimeWarning)
assert "Cannot get binary type for file" in str(w[-1].message)
def test_dumpwindow(self):
"""Make sure dumpwindow() function works"""
dlgdump = dumpwindow(self.dlghandle)
for key, item in dlgdump.items():
self.assertEqual(item, globals()[key](self.dlghandle))
editdump = dumpwindow(self.edit_handle)
for key, item in editdump.items():
self.assertEqual(item, globals()[key](self.edit_handle))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -8,643,938,393,951,203,000 | 41.668852 | 87 | 0.656055 | false |
hoglet67/ElectronFpga | pcb/common/build_gerber_previews.py | 1 | 2124 | from __future__ import print_function
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gerber import load_layer
from gerber.render import GerberCairoContext, RenderSettings, theme
from glob import glob
import os
import sys
def generate_previews(fab_output_path, preview_output_path):
def read(pattern):
files = glob(os.path.join(fab_output_path, pattern))
if not files:
print("WARNING: Nothing found matching %s" % pattern)
return None
return load_layer(files[0])
def save(name):
path = os.path.join(preview_output_path, "%s.png" % name)
print("Saving preview to %s" % path)
ctx.dump(path)
def render(pattern, **kw):
layer = read(pattern)
if layer is None:
print("Not rendering %s" % pattern)
return
ctx.render_layer(layer, **kw)
# Rendering context
ctx = GerberCairoContext(scale=10)
ctx.color = (80./255, 80/255., 154/255.)
ctx.drill_color = ctx.color
# Edges
render("*.gm1")
# Copper
render("*.gtl")
# Mask
render("*.gts")
# Silk
render("*.gto", settings=RenderSettings(color=theme.COLORS['white'], alpha=0.85))
# Drills
render("*.drl")
save("pcb-front")
ctx.clear()
# Edges
render("*.gm1")
# Copper
render("*.gbl")
# Mask
render("*.gbs")
# Silk
render("*.gbo", settings=RenderSettings(color=theme.COLORS['white'], alpha=0.85))
# Drills
render("*.drl")
save("pcb-back")
if __name__ == '__main__':
generate_previews('gerber_tmp', '.')
| gpl-3.0 | 6,145,875,204,471,986,000 | 26.584416 | 85 | 0.636064 | false |
qxf2/qxf2-page-object-model | utils/gmail/utf.py | 1 | 3234 | # The contents of this file has been derived code from the Twisted project
# (http://twistedmatrix.com/). The original author is Jp Calderone.
# Twisted project license follows:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
text_type = unicode
binary_type = str
PRINTABLE = set(range(0x20, 0x26)) | set(range(0x27, 0x7f))
def encode(s):
"""Encode a folder name using IMAP modified UTF-7 encoding.
Despite the function's name, the output is still a unicode string.
"""
if not isinstance(s, text_type):
return s
r = []
_in = []
def extend_result_if_chars_buffered():
if _in:
r.extend(['&', modified_utf7(''.join(_in)), '-'])
del _in[:]
for c in s:
if ord(c) in PRINTABLE:
extend_result_if_chars_buffered()
r.append(c)
elif c == '&':
extend_result_if_chars_buffered()
r.append('&-')
else:
_in.append(c)
extend_result_if_chars_buffered()
return ''.join(r)
def decode(s):
"""Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Despite the function's name, the input may still be a unicode
string. If the input is bytes, it's first decoded to unicode.
"""
if isinstance(s, binary_type):
s = s.decode('latin-1')
if not isinstance(s, text_type):
return s
r = []
_in = []
for c in s:
if c == '&' and not _in:
_in.append('&')
elif c == '-' and _in:
if len(_in) == 1:
r.append('&')
else:
r.append(modified_deutf7(''.join(_in[1:])))
_in = []
elif _in:
_in.append(c)
else:
r.append(c)
if _in:
r.append(modified_deutf7(''.join(_in[1:])))
return ''.join(r)
def modified_utf7(s):
# encode to utf-7: '\xff' => b'+AP8-', decode from latin-1 => '+AP8-'
s_utf7 = s.encode('utf-7').decode('latin-1')
return s_utf7[1:-1].replace('/', ',')
def modified_deutf7(s):
s_utf7 = '+' + s.replace(',', '/') + '-'
# encode to latin-1: '+AP8-' => b'+AP8-', decode from utf-7 => '\xff'
return s_utf7.encode('latin-1').decode('utf-7') | mit | -3,698,311,829,193,470,500 | 31.676768 | 74 | 0.611936 | false |
flennerhag/mlens | mlens/index/base.py | 1 | 9375 | """ML-ENSEMBLE
:author: Sebastian Flennerhag
:copyright: 2017-2018
:licence: MIT
Base classes for partitioning training data.
"""
from __future__ import division
from abc import abstractmethod
import numpy as np
from ..externals.sklearn.base import BaseEstimator
def prune_train(start_below, stop_below, start_above, stop_above):
"""Checks if indices above or below are empty and remove them.
A utility function for checking if the train indices below the a given
test set range are (0, 0), or if indices above the test set range is
(n, n). In this case, these will lead to an empty array and therefore
can safely be removed to create a single training set index range.
Parameters
----------
start_below : int
index number starting below the test set. Should always be the same
for all test sets.
stop_below : int
the index number at which the test set is starting on.
start_above : int
the index number at which the test set ends.
stop_above : int
The end of the data set (n). Should always be the same for all test
sets.
"""
if start_below == stop_below:
tri = ((start_above, stop_above),)
elif start_above == stop_above:
tri = ((start_below, stop_below),)
else:
tri = ((start_below, stop_below), (start_above, stop_above))
return tri
def partition(n, p):
"""Get partition sizes for a given number of samples and partitions.
This method will give an array containing the sizes of ``p`` partitions
given a total sample size of ``n``. If there is a remainder from the
split, the r first folds will be incremented by 1.
Parameters
----------
n : int
number of samples.
p : int
number of partitions.
Examples
--------
Return sample sizes of 2 partitions given a total of 4 samples
>>> from mlens.index.base import partition
>>> _partition(4, 2)
array([2, 2])
Return sample sizes of 3 partitions given a total of 8 samples
>>> from mlens.index.base import partition
>>> _partition(8, 3)
array([3, 3, 2])
"""
sizes = (n // p) * np.ones(p, dtype=np.int)
sizes[:n % p] += 1
return sizes
def make_tuple(arr):
"""Make a list of index tuples from array
Parameters
----------
arr : array
Returns
-------
out : list
Examples
--------
>>> import numpy as np
>>> from mlens.index.base import make_tuple
>>> _make_tuple(np.array([0, 1, 2, 5, 6, 8, 9, 10]))
[(0, 3), (5, 7), (8, 11)]
"""
out = list()
t1 = t0 = arr[0]
for i in arr[1:]:
if i - t1 <= 1:
t1 = i
continue
out.append((t0, t1 + 1))
t1 = t0 = i
out.append((t0, t1 + 1))
return out
class BaseIndex(BaseEstimator):
"""Base Index class.
Specification of indexer-wide methods and attributes that we can always
expect to find in any indexer. Helps to provide a uniform interface
during parallel estimation.
"""
def __init__(self):
self.folds = None
self.partitions = 1
self.n_samples = None
self.n_test_samples = None
self.__fitted__ = False
@abstractmethod
def fit(self, X, y=None, job=None):
"""Method for storing array data.
Parameters
----------
X : array-like of shape [n_samples, optional]
array to _collect dimension data from.
y : array-like, optional
label data
job : str, optional
optional job type data
Returns
-------
instance :
indexer with stores sample size data.
Notes
-----
Fitting an indexer stores nothing that points to the array
or memmap ``X``. Only the ``shape`` attribute of ``X`` is called.
"""
@abstractmethod
def _gen_indices(self):
"""Method for constructing the index generator.
This should be modified by each indexer class to build the desired
index. Currently, the Default is the standard K-Fold as this method
is returned by Subset-based indexer when number of subsets is ``1``.
Returns
-------
iterable :
a generator of ``train_index, test_index``.
"""
n_samples = self.n_samples
folds = self.folds
if folds == 1:
# Return the full index as both training and test set
yield ((0, n_samples),), (0, n_samples)
else:
# Get the length of the test sets
tei_len = partition(n_samples, folds)
last = 0
for size in tei_len:
# Test set
tei_start, tei_stop = last, last + size
tei = (tei_start, tei_stop)
# Train set
tri_start_below, tri_stop_below = 0, tei_start
tri_start_above, tri_stop_above = tei_stop, n_samples
tri = prune_train(tri_start_below, tri_stop_below,
tri_start_above, tri_stop_above)
yield tri, tei
last = tei_stop
# pylint: disable=unused-argument, no-self-use
def partition(self, X=None, as_array=False):
"""Partition generator method.
Default behavior is to yield ``None``
for fitting on full data. Overridden in
:class:`SubsetIndex` and :class:`ClusteredSubsetIndex`
to produce partition indexes.
"""
yield None
def generate(self, X=None, as_array=False):
r"""Front-end generator method.
Generator for training and test set indices based on the
generator specification in ``_gen_indicies``.
Parameters
----------
X : array-like, optional
If instance has not been fitted, the training set ``X`` must be
passed to the ``generate`` method, which will call ``fit`` before
proceeding. If already fitted, ``X`` can be omitted.
as_array : bool (default = False)
whether to return train and test indices as a pair of tuple(s)
or numpy arrays. If the returned tuples are singular they can be
used on an array X with standard slicing syntax
(``X[start:stop]``), but if a list of tuples is returned
slicing ``X`` properly requires first building a list or array
of index numbers from the list of tuples. This can be achieved
either by setting ``as_array`` to ``True``, or running ::
for train_tup, test_tup in indexer.generate():
train_idx = \
np.hstack([np.arange(t0, t1) for t0, t1 in train_tup])
when slicing is required.
"""
# Check that the instance have some array information to work with
if not self.__fitted__:
if X is None:
raise AttributeError("No array provided to indexer. Either "
"pass an array to the 'generate' method, "
"or call the 'fit' method first or "
"initiate the instance with an array X "
"as argument.")
# Need to call fit to continue
self.fit(X)
for tri, tei in self._gen_indices():
if as_array:
tri = self._build_range(tri)
tei = self._build_range(tei)
yield tri, tei
@staticmethod
def _build_range(idx):
"""Build an array of indexes from a list or tuple of index tuples.
Given an index object containing tuples of ``(start, stop)`` indexes
``_build_range`` will return an array that concatenate all elements
between each ``start`` and ``stop`` number.
Examples
--------
Single slice (convex slicing)
>>> from mlens.index.base import BaseIndex
>>> BaseIndex._build_range((0, 6))
array([0, 1, 2, 3, 4, 5])
Several slices (non-convex slicing)
>>> from mlens.index.base import BaseIndex
>>> BaseIndex._build_range([(0, 2), (4, 6)])
array([0, 1, 4, 5])
"""
if isinstance(idx[0], tuple):
return np.hstack([np.arange(t0, t1) for t0, t1 in idx])
return np.arange(idx[0], idx[1])
def set_params(self, **params):
self.__fitted__ = False
return super(BaseIndex, self).set_params(**params)
class FullIndex(BaseIndex):
"""Vacuous indexer to be used with final layers.
FullIndex is a compatibility class to be used with meta layers. It stores
the sample size to be predicted for use with the
:class:`ParallelProcessing` job manager, and yields a ``None, None``
index when `generate` is called.
"""
def __init__(self, X=None):
super(FullIndex, self).__init__()
if X is not None:
self.fit(X)
def fit(self, X, y=None, job=None):
"""Store dimensionality data about X."""
self.n_samples = X.shape[0]
self.n_test_samples = X.shape[0]
self.__fitted__ = True
def _gen_indices(self):
"""Vacuous generator to ensure training data is not sliced."""
yield None, None
| mit | 6,683,603,276,718,403,000 | 28.761905 | 79 | 0.568 | false |
kittiu/sale-workflow | sale_promotion_rule/models/sale_order_line.py | 1 | 1567 | # -*- coding: utf-8 -*-
# Copyright 2017 Akretion (http://www.akretion.com).
# @author Benoît GUILLOT <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
promotion_rule_ids = fields.Many2many(
'sale.promotion.rule',
string='Promotion rules',
domain=[('rule_type', '!=', 'coupon')]
)
coupon_promotion_rule_id = fields.Many2one(
'sale.promotion.rule',
string='Coupon promotion rule',
domain=[('rule_type', '=', 'coupon')]
)
coupon_code = fields.Char(
related='coupon_promotion_rule_id.code',
readonly=True,
store=True
)
applied_promotion_rule_ids = fields.Many2many(
'sale.promotion.rule',
string='Promotion rules',
compute='_compute_applied_promotion_rule_ids'
)
has_promotion_rules = fields.Boolean(
compute='_compute_has_promotion_rules'
)
@api.depends('promotion_rule_ids', 'coupon_promotion_rule_id')
def _compute_has_promotion_rules(self):
for rec in self:
rec.has_promotion_rules = (
rec.coupon_promotion_rule_id or
rec.promotion_rule_ids)
@api.depends('promotion_rule_ids', 'coupon_promotion_rule_id')
def _compute_applied_promotion_rule_ids(self):
for rec in self:
rec.applied_promotion_rule_ids = (
rec.coupon_promotion_rule_id | rec.promotion_rule_ids)
| agpl-3.0 | -253,570,785,712,559,520 | 30.32 | 70 | 0.616858 | false |
Zehaos/MobileNet | tools/time_benchmark.py | 1 | 4573 | import tensorflow as tf
import time
from datetime import datetime
import math
import argparse
import sys
from nets.mobilenet import mobilenet, mobilenet_arg_scope
import numpy as np
slim = tf.contrib.slim
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target)
duration = time.time() - start_time
if i >= num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def time_tensorflow_run_placeholder(session, target, feed_dict, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target,feed_dict=feed_dict)
duration = time.time() - start_time
if i >= num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def run_benchmark():
if FLAGS.quantized:
graph_filename = FLAGS.quantized_graph
# Create a graph def object to read the graph
with tf.gfile.GFile(graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
with tf.device('/'+FLAGS.mode+':0'):
image_size = 224
if FLAGS.quantized:
inputs = np.random.random((FLAGS.batch_size, image_size, image_size, 3))
tf.import_graph_def(graph_def)
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=config)
# We define the input and output node we will feed in
input_node = graph.get_tensor_by_name('import/MobileNet/input_images:0')
output_node = graph.get_tensor_by_name('import/MobileNet/Predictions/Softmax:0')
time_tensorflow_run_placeholder(sess, output_node, {input_node: inputs}, "Forward")
else:
image_size = 224
inputs = tf.Variable(tf.random_normal([FLAGS.batch_size,
image_size,
image_size, 3],
dtype=tf.float32,
stddev=1e-1))
with slim.arg_scope(mobilenet_arg_scope()):
logits, end_points = mobilenet(inputs, is_training=False)
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=config)
sess.run(init)
time_tensorflow_run(sess, logits, "Forward")
# Add a simple objective so we can calculate the backward pass.
objective = tf.nn.l2_loss(logits)
# Compute the gradient with respect to all the parameters.
grad = tf.gradients(objective, tf.trainable_variables())
# Run the backward benchmark.
time_tensorflow_run(sess, grad, "Forward-backward")
def main(_):
run_benchmark()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=1,
help='Batch size.'
)
parser.add_argument(
'--num_batches',
type=int,
default=100,
help='Number of batches to run.'
)
parser.add_argument(
'--mode',
type=str,
default='cpu',
help='gpu/cpu mode.'
)
parser.add_argument(
'--quantized',
type=bool,
default=False,
help='Benchmark quantized graph.'
)
parser.add_argument(
'--quantized_graph',
type=str,
default='',
help='Path to quantized graph file.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | -2,936,126,996,077,355,000 | 32.137681 | 119 | 0.615788 | false |
ScaleXY/speedby | speedby/transaction/views.py | 1 | 3241 | from django.shortcuts import render
from django.http import Http404
from django.http import HttpResponse, HttpResponseRedirect,Http404
import datetime
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.timezone import localtime
from transaction.models import Transaction
from property.models import Property
from vehicle.models import Vehicle
# Create your views here.
def index(request):
data = {}
data['title'] = "Transaction"
return render(request, 'transaction/index.html', data)
def punchin(request, car_id=0, property_id = None):
data = {}
if property_id:
property = get_object_or_404(Property, id=property_id);
transaction, created = Transaction.objects.get_or_create(
registration_number=car_id,
status=True
)
vehicle = Vehicle.objects.filter(registration_number=car_id)
if vehicle:
transaction.vehicle = vehicle[0]
if property_id:
transaction.property = property
transaction.save()
data['created'] = created
data['transaction'] = transaction
data['title'] = "Punch In"
return render(request,'transaction/punch_in.html', data)
def punchout(request, car_id=0):
data = {}
transaction = get_object_or_404(Transaction, registration_number=car_id, status=True);
transaction.checkout = datetime.datetime.utcnow()
transaction.status = False
time_difference = (transaction.checkout).replace(tzinfo=None) - (transaction.checkin).replace(tzinfo=None)
transaction.duration = time_difference
hour = time_difference.seconds//3600
transaction.hour = hour
price = 0
if transaction.property:
price = transaction.property.rate_min
if hour > 1:
for i in range(hour):
price += transaction.property.rate_hour
transaction.price = price
transaction.save()
title = 'Parked Out'
data ['transaction'] = transaction
data['title'] = title
return render(request,'transaction/punch_out.html', data)
def recipt(request, recipt_id):
data = {}
transaction = get_object_or_404(Transaction, id=recipt_id);
if transaction.payment_method == Transaction.ONLINE and transaction.payment_status == Transaction.NOTPAID and transaction.vehicle.user == request.user :
onlinepayment = True
else :
onlinepayment = False
data['onlinepayment'] = onlinepayment
data['transaction'] = transaction
data['title'] = 'Recipt #' + recipt_id
return render(request,'transaction/recipt.html', data)
def payment_method(request, recipt_id, method = None):
data = {}
transaction = get_object_or_404(Transaction, id=recipt_id,payment_status=Transaction.NOTPAID);
if method and transaction.vehicle :
transaction.payment_method = Transaction.ONLINE
transaction.payment_status = Transaction.NOTPAID
transaction.save()
else :
transaction.payment_method = Transaction.OFFLINE
transaction.payment_status = Transaction.PAID
transaction.save()
return HttpResponseRedirect(reverse('transaction.recipt', kwargs={'recipt_id':transaction.id}))
data['transaction'] = transaction
data['title'] = 'Payment Confirmation #' + recipt_id
return render(request,'transaction/recipt.html', data)
| gpl-2.0 | -8,113,250,145,105,668,000 | 31.089109 | 154 | 0.71984 | false |
huhongbo/dd-agent | utils/flare.py | 1 | 22464 | # stdlib
import atexit
import cStringIO as StringIO
from collections import namedtuple
from functools import partial
import glob
try:
import grp
except ImportError:
# The module only exists on Unix platforms
grp = None
import logging
import os
try:
import pwd
except ImportError:
# Same as above (exists on Unix platforms only)
pwd = None
import re
import stat
import subprocess
import sys
import tarfile
import tempfile
from time import strftime
import traceback
# 3p
import requests
# DD imports
from checks.check_status import CollectorStatus, DogstatsdStatus, ForwarderStatus
from config import (
check_yaml,
get_confd_path,
get_config,
get_config_path,
get_logging_config,
get_url_endpoint,
)
from jmxfetch import JMXFetch
from util import get_hostname
from utils.jmx import jmx_command, JMXFiles
from utils.platform import Platform
# Globals
log = logging.getLogger(__name__)
def configcheck():
all_valid = True
for conf_path in glob.glob(os.path.join(get_confd_path(), "*.yaml")):
basename = os.path.basename(conf_path)
try:
check_yaml(conf_path)
except Exception, e:
all_valid = False
print "%s contains errors:\n %s" % (basename, e)
else:
print "%s is valid" % basename
if all_valid:
print "All yaml files passed. You can now run the Datadog agent."
return 0
else:
print("Fix the invalid yaml files above in order to start the Datadog agent. "
"A useful external tool for yaml parsing can be found at "
"http://yaml-online-parser.appspot.com/")
return 1
class Flare(object):
"""
Compress all important logs and configuration files for debug,
and then send them to Datadog (which transfers them to Support)
"""
DATADOG_SUPPORT_URL = '/support/flare'
CredentialPattern = namedtuple('CredentialPattern', ['pattern', 'replacement', 'label'])
CHECK_CREDENTIALS = [
CredentialPattern(
re.compile('( *(\w|_)*pass(word)?:).+'),
r'\1 ********',
'password'
),
CredentialPattern(
re.compile('(.*\ [A-Za-z0-9]+)\:\/\/([A-Za-z0-9]+)\:(.+)\@'),
r'\1://\2:********@',
'password in a uri'
),
]
MAIN_CREDENTIALS = [
CredentialPattern(
re.compile('^api_key: *\w+(\w{5})$'),
r'api_key: *************************\1',
'api_key'
),
CredentialPattern(
re.compile('^(proxy_user|proxy_password): *.+'),
r'\1: ********',
'proxy credentials'
),
]
COMMENT_REGEX = re.compile('^ *#.*')
COMPRESSED_FILE = 'datadog-agent-{0}.tar.bz2'
# We limit to 10MB arbitrarily
MAX_UPLOAD_SIZE = 10485000
TIMEOUT = 60
def __init__(self, cmdline=False, case_id=None):
self._case_id = case_id
self._cmdline = cmdline
self._init_tarfile()
self._init_permissions_file()
self._save_logs_path()
self._config = get_config()
self._api_key = self._config.get('api_key')
self._url = "{0}{1}".format(
get_url_endpoint(self._config.get('dd_url'), endpoint_type='flare'),
self.DATADOG_SUPPORT_URL
)
self._hostname = get_hostname(self._config)
self._prefix = "datadog-{0}".format(self._hostname)
# On Unix system, check that the user is root (to call supervisorctl & status)
# Otherwise emit a warning, and ask for confirmation
@staticmethod
def check_user_rights():
if Platform.is_linux() and not os.geteuid() == 0:
log.warning("You are not root, some information won't be collected")
choice = raw_input('Are you sure you want to continue [y/N]? ')
if choice.strip().lower() not in ['yes', 'y']:
print 'Aborting'
sys.exit(1)
else:
log.warn('Your user has to have at least read access'
' to the logs and conf files of the agent')
# Collect all conf and logs files and compress them
def collect(self):
if not self._api_key:
raise Exception('No api_key found')
log.info("Collecting logs and configuration files:")
self._add_logs_tar()
self._add_conf_tar()
log.info(" * datadog-agent configcheck output")
self._add_command_output_tar('configcheck.log', configcheck)
log.info(" * datadog-agent status output")
self._add_command_output_tar('status.log', self._supervisor_status)
log.info(" * datadog-agent info output")
self._add_command_output_tar('info.log', self._info_all)
self._add_jmxinfo_tar()
log.info(" * pip freeze")
self._add_command_output_tar('freeze.log', self._pip_freeze,
command_desc="pip freeze --no-cache-dir")
log.info(" * log permissions on collected files")
self._permissions_file.close()
self._add_file_tar(self._permissions_file.name, 'permissions.log',
log_permissions=False)
log.info("Saving all files to {0}".format(self.tar_path))
self._tar.close()
# Upload the tar file
def upload(self, email=None):
self._check_size()
if self._cmdline:
self._ask_for_confirmation()
if not email:
email = self._ask_for_email()
log.info("Uploading {0} to Datadog Support".format(self.tar_path))
url = self._url
if self._case_id:
url = '{0}/{1}'.format(self._url, str(self._case_id))
url = "{0}?api_key={1}".format(url, self._api_key)
requests_options = {
'data': {
'case_id': self._case_id,
'hostname': self._hostname,
'email': email
},
'files': {'flare_file': open(self.tar_path, 'rb')},
'timeout': self.TIMEOUT
}
if Platform.is_windows():
requests_options['verify'] = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir,
'datadog-cert.pem'
))
self._resp = requests.post(url, **requests_options)
self._analyse_result()
return self._case_id
# Start by creating the tar file which will contain everything
def _init_tarfile(self):
# Default temp path
self.tar_path = os.path.join(
tempfile.gettempdir(),
self.COMPRESSED_FILE.format(strftime("%Y-%m-%d-%H-%M-%S"))
)
if os.path.exists(self.tar_path):
os.remove(self.tar_path)
self._tar = tarfile.open(self.tar_path, 'w:bz2')
# Create a file to log permissions on collected files and write header line
def _init_permissions_file(self):
self._permissions_file = tempfile.NamedTemporaryFile(mode='w', prefix='dd', delete=False)
if Platform.is_unix():
self._permissions_file_format = "{0:50} | {1:5} | {2:10} | {3:10}\n"
header = self._permissions_file_format.format("File path", "mode", "owner", "group")
self._permissions_file.write(header)
self._permissions_file.write('-'*len(header) + "\n")
else:
self._permissions_file.write("Not implemented: file permissions are only logged on Unix platforms")
# Save logs file paths
def _save_logs_path(self):
prefix = ''
if Platform.is_windows():
prefix = 'windows_'
config = get_logging_config()
self._collector_log = config.get('{0}collector_log_file'.format(prefix))
self._forwarder_log = config.get('{0}forwarder_log_file'.format(prefix))
self._dogstatsd_log = config.get('{0}dogstatsd_log_file'.format(prefix))
self._jmxfetch_log = config.get('jmxfetch_log_file')
# Add logs to the tarfile
def _add_logs_tar(self):
self._add_log_file_tar(self._collector_log)
self._add_log_file_tar(self._forwarder_log)
self._add_log_file_tar(self._dogstatsd_log)
self._add_log_file_tar(self._jmxfetch_log)
self._add_log_file_tar(
"{0}/*supervisord.log".format(os.path.dirname(self._collector_log))
)
def _add_log_file_tar(self, file_path):
for f in glob.glob('{0}*'.format(file_path)):
if self._can_read(f):
self._add_file_tar(
f,
os.path.join('log', os.path.basename(f))
)
# Collect all conf
def _add_conf_tar(self):
conf_path = get_config_path()
if self._can_read(conf_path, output=False):
self._add_clean_conf(
conf_path,
'etc',
self.MAIN_CREDENTIALS
)
if not Platform.is_windows():
supervisor_path = os.path.join(
os.path.dirname(get_config_path()),
'supervisor.conf'
)
if self._can_read(supervisor_path, output=False):
self._add_clean_conf(
supervisor_path,
'etc'
)
for file_path in glob.glob(os.path.join(get_confd_path(), '*.yaml')) +\
glob.glob(os.path.join(get_confd_path(), '*.yaml.default')):
if self._can_read(file_path, output=False):
self._add_clean_conf(
file_path,
os.path.join('etc', 'confd'),
self.CHECK_CREDENTIALS
)
# Collect JMXFetch-specific info and save to jmxinfo directory if jmx config
# files are present and valid
def _add_jmxinfo_tar(self):
_, _, should_run_jmx = self._capture_output(self._should_run_jmx)
if should_run_jmx:
# status files (before listing beans because executing jmxfetch overwrites status files)
for file_name, file_path in [
(JMXFiles._STATUS_FILE, JMXFiles.get_status_file_path()),
(JMXFiles._PYTHON_STATUS_FILE, JMXFiles.get_python_status_file_path())
]:
if self._can_read(file_path, warn=False):
self._add_file_tar(
file_path,
os.path.join('jmxinfo', file_name)
)
# beans lists
for command in ['list_matching_attributes', 'list_everything']:
log.info(" * datadog-agent jmx {0} output".format(command))
self._add_command_output_tar(
os.path.join('jmxinfo', '{0}.log'.format(command)),
partial(self._jmx_command_call, command)
)
# java version
log.info(" * java -version output")
_, _, java_bin_path = self._capture_output(
lambda: JMXFetch.get_configuration(get_confd_path())[2] or 'java')
self._add_command_output_tar(
os.path.join('jmxinfo', 'java_version.log'),
lambda: self._java_version(java_bin_path),
command_desc="{0} -version".format(java_bin_path)
)
# Add a file to the tar and append the file's rights to the permissions log (on Unix)
# If original_file_path is passed, the file_path will be added to the tar but the original file's
# permissions are logged
def _add_file_tar(self, file_path, target_path, log_permissions=True, original_file_path=None):
target_full_path = os.path.join(self._prefix, target_path)
if log_permissions and Platform.is_unix():
stat_file_path = original_file_path or file_path
file_stat = os.stat(stat_file_path)
# The file mode is returned in binary format, convert it to a more readable octal string
mode = oct(stat.S_IMODE(file_stat.st_mode))
try:
uname = pwd.getpwuid(file_stat.st_uid).pw_name
except KeyError:
uname = str(file_stat.st_uid)
try:
gname = grp.getgrgid(file_stat.st_gid).gr_name
except KeyError:
gname = str(file_stat.st_gid)
self._permissions_file.write(self._permissions_file_format.format(stat_file_path, mode, uname, gname))
self._tar.add(file_path, target_full_path)
# Returns whether JMXFetch should run or not
def _should_run_jmx(self):
jmx_process = JMXFetch(get_confd_path(), self._config)
jmx_process.configure(clean_status_file=False)
return jmx_process.should_run()
# Check if the file is readable (and log it)
@classmethod
def _can_read(cls, f, output=True, warn=True):
if os.access(f, os.R_OK):
if output:
log.info(" * {0}".format(f))
return True
else:
if warn:
log.warn(" * not readable - {0}".format(f))
return False
def _add_clean_conf(self, file_path, target_dir, credential_patterns=None):
basename = os.path.basename(file_path)
temp_path, log_message = self._strip_credentials(file_path, credential_patterns)
log.info(' * {0}{1}'.format(file_path, log_message))
self._add_file_tar(
temp_path,
os.path.join(target_dir, basename),
original_file_path=file_path
)
# Return path to a temp file without comments on which the credential patterns have been applied
def _strip_credentials(self, file_path, credential_patterns=None):
if not credential_patterns:
credential_patterns = []
credentials_found = set()
fh, temp_path = tempfile.mkstemp(prefix='dd')
atexit.register(os.remove, temp_path)
with os.fdopen(fh, 'w') as temp_file:
with open(file_path, 'r') as orig_file:
for line in orig_file.readlines():
if not self.COMMENT_REGEX.match(line):
clean_line, credential_found = self._clean_credentials(line, credential_patterns)
temp_file.write(clean_line)
if credential_found:
credentials_found.add(credential_found)
credentials_log = ''
if len(credentials_found) > 1:
credentials_log = ' - this file contains credentials ({0}) which'\
' have been removed in the collected version'\
.format(', '.join(credentials_found))
elif len(credentials_found) == 1:
credentials_log = ' - this file contains a credential ({0}) which'\
' has been removed in the collected version'\
.format(credentials_found.pop())
return temp_path, credentials_log
# Remove credentials from a given line
def _clean_credentials(self, line, credential_patterns):
credential_found = None
for credential_pattern in credential_patterns:
if credential_pattern.pattern.match(line):
line = re.sub(credential_pattern.pattern, credential_pattern.replacement, line)
credential_found = credential_pattern.label
# only one pattern should match per line
break
return line, credential_found
# Add output of the command to the tarfile
def _add_command_output_tar(self, name, command, command_desc=None):
out, err, _ = self._capture_output(command, print_exc_to_stderr=False)
fh, temp_path = tempfile.mkstemp(prefix='dd')
with os.fdopen(fh, 'w') as temp_file:
if command_desc:
temp_file.write(">>>> CMD <<<<\n")
temp_file.write(command_desc)
temp_file.write("\n")
temp_file.write(">>>> STDOUT <<<<\n")
temp_file.write(out.getvalue())
out.close()
temp_file.write(">>>> STDERR <<<<\n")
temp_file.write(err.getvalue())
err.close()
self._add_file_tar(temp_path, name, log_permissions=False)
os.remove(temp_path)
# Capture the output of a command (from both std streams and loggers) and the
# value returned by the command
def _capture_output(self, command, print_exc_to_stderr=True):
backup_out, backup_err = sys.stdout, sys.stderr
out, err = StringIO.StringIO(), StringIO.StringIO()
backup_handlers = logging.root.handlers[:]
logging.root.handlers = [logging.StreamHandler(out)]
sys.stdout, sys.stderr = out, err
return_value = None
try:
return_value = command()
except Exception:
# Print the exception to either stderr or `err`
traceback.print_exc(file=backup_err if print_exc_to_stderr else err)
finally:
# Stop capturing in a `finally` block to reset std streams' and loggers'
# behaviors no matter what
sys.stdout, sys.stderr = backup_out, backup_err
logging.root.handlers = backup_handlers
return out, err, return_value
# Print supervisor status (and nothing on windows)
def _supervisor_status(self):
if Platform.is_windows():
print 'Windows - status not implemented'
else:
agent_exec = self._get_path_agent_exec()
print '{0} status'.format(agent_exec)
self._print_output_command([agent_exec, 'status'])
supervisor_exec = self._get_path_supervisor_exec()
print '{0} status'.format(supervisor_exec)
self._print_output_command([supervisor_exec,
'-c', self._get_path_supervisor_conf(),
'status'])
# Find the agent exec (package or source)
def _get_path_agent_exec(self):
if Platform.is_mac():
agent_exec = '/opt/datadog-agent/bin/datadog-agent'
else:
agent_exec = '/etc/init.d/datadog-agent'
if not os.path.isfile(agent_exec):
agent_exec = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../bin/agent'
)
return agent_exec
# Find the supervisor exec (package or source)
def _get_path_supervisor_exec(self):
supervisor_exec = '/opt/datadog-agent/bin/supervisorctl'
if not os.path.isfile(supervisor_exec):
supervisor_exec = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../venv/bin/supervisorctl'
)
return supervisor_exec
# Find the supervisor conf (package or source)
def _get_path_supervisor_conf(self):
if Platform.is_mac():
supervisor_conf = '/opt/datadog-agent/etc/supervisor.conf'
else:
supervisor_conf = '/etc/dd-agent/supervisor.conf'
if not os.path.isfile(supervisor_conf):
supervisor_conf = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../supervisord/supervisord.conf'
)
return supervisor_conf
# Print output of command
def _print_output_command(self, command):
try:
status = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
status = 'Not able to get output, exit number {0}, exit output:\n'\
'{1}'.format(str(e.returncode), e.output)
print status
# Print info of all agent components
def _info_all(self):
CollectorStatus.print_latest_status(verbose=True)
DogstatsdStatus.print_latest_status(verbose=True)
ForwarderStatus.print_latest_status(verbose=True)
# Call jmx_command with std streams redirection
def _jmx_command_call(self, command):
try:
jmx_command([command], self._config, redirect_std_streams=True)
except Exception, e:
print "Unable to call jmx command {0}: {1}".format(command, e)
# Print java version
def _java_version(self, java_bin_path):
try:
self._print_output_command([java_bin_path, '-version'])
except OSError:
print 'Unable to execute java bin with command: {0}'.format(java_bin_path)
# Run a pip freeze
def _pip_freeze(self):
try:
import pip
pip.main(['freeze', '--no-cache-dir'])
except ImportError:
print 'Unable to import pip'
# Check if the file is not too big before upload
def _check_size(self):
if os.path.getsize(self.tar_path) > self.MAX_UPLOAD_SIZE:
log.info("{0} won't be uploaded, its size is too important.\n"
"You can send it directly to support by email.")
sys.exit(1)
# Function to ask for confirmation before upload
def _ask_for_confirmation(self):
print '{0} is going to be uploaded to Datadog.'.format(self.tar_path)
choice = raw_input('Do you want to continue [Y/n]? ')
if choice.strip().lower() not in ['yes', 'y', '']:
print 'Aborting (you can still use {0})'.format(self.tar_path)
sys.exit(1)
# Ask for email if needed
def _ask_for_email(self):
# We ask everytime now, as it is also the 'id' to check
# that the case is the good one if it exists
return raw_input('Please enter your email: ').lower()
# Print output (success/error) of the request
def _analyse_result(self):
# First catch our custom explicit 400
if self._resp.status_code == 400:
raise Exception('Your request is incorrect: {0}'.format(self._resp.json()['error']))
# Then raise potential 500 and 404
self._resp.raise_for_status()
try:
self._case_id = self._resp.json()['case_id']
# Failed parsing
except ValueError:
raise Exception('An unknown error has occured - '
'Please contact support by email')
# Finally, correct
log.info("Your logs were successfully uploaded. For future reference,"
" your internal case id is {0}".format(self._case_id))
| bsd-3-clause | 3,822,984,352,316,557,300 | 38.135889 | 114 | 0.569044 | false |
Seko34/Kodi-Development | script.module.seko.downloader/lib/downloaderModule/plugins/AdvancedDownloader.py | 1 | 55224 | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------
'''
Created on 07 Nov. 2015
@author: Seko
@summary: Advanced Downloader
'''
#---------------------------------------------------------------------
# ____________________ I M P O R T ____________________
import util
import xbmc
import xbmcaddon
import xbmcvfs
import xbmcgui
import StorageServer
import os
import re
import urllib2
import time
import zipfile
from contextlib import closing
from downloaderModule.pluginDownloaderTpl import downloaderTemplate
# ___ Initialize database
try:
from sqlite3 import dbapi2 as sqlite
xbmc.log("[AdvDownloader] Loading sqlite3 as DB engine", xbmc.LOGDEBUG)
except:
from pysqlite2 import dbapi2 as sqlite
xbmc.log("[AdvDownloader] Loading pysqlite2 as DB engine", xbmc.LOGDEBUG)
"""
AdvancedDownloader Class
"""
class AdvancedDownloader(downloaderTemplate):
# ___ HEADER CONFIGURATION FOR HTML REQUEST
HEADER_CFG = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
# ___ DOWNLOAD STATUS
STATUS_STARTING = 0
STATUS_DOWNLOADING = 1
STATUS_DOWNLOADING_READY_TO_PLAY = 2
STATUS_STOPPED = 3
STATUS_FINISHED = 4
STATUS_ERROR = 5
# ___ QUEUE TYPE
QUEUE_DB = 1
QUEUE_CACHE = 2
# ___ progressDialog
pDialog = None
def __init__(self):
"""
Constructor
"""
self.ID = 2
# ___ Various variables
self.__addon__ = xbmcaddon.Addon(id='script.module.seko.downloader')
self.__addonDir__ = xbmc.translatePath(self.__addon__.getAddonInfo('path'))
# ___ Init queue
# ___ @Todo : Add variable to settings
self.queueType = 1
if self.queueType == self.QUEUE_DB:
self.__queue__ = DatabaseQueue()
elif self.queueType == self.QUEUE_CACHE:
self.__queue__ = CacheQueue()
xbmc.log("[AdvDownloader] Init Advanced Downloader Done", xbmc.LOGDEBUG)
def clearAll(self):
pass
def download(self, fileName, params, async=True):
"""
Download method
@param filename: the name of the file to download
@param params: the dictionnary with informations about the file to download
The minimum format is :
{
'url':'<the url to download the file>',
'title':'<Title of the movie>',
'destinationFolder':'<the destination folder>',
'webBrowserId': '<the web browser id>',
'playAtEnd': playAtEnd, => Boolean which indicates if the download file should be play at the end of the download
(-- No mandatory variable --)
'useragent': the user-agent to use for download the file
'incomplete_path': incomplete_path, => path where the temporary file is downloaded
(-- Variable added after --)
'fileName' : the file name
'complete_path': complete_path, => path where the complete file is moved
'totalsize': totalsize, => the total size of the downloaded file
'current_dl_size': float(result[i][7]),
'current_percent_dl': float(result[i][8]),
'dl_status': int(result[i][9]),
'async': int(result[i][10]),
'queuePosition': the position in the queue, => calculate during insertion in the queue
(-- Variable for streamer --)
'cacheSize': cache Size, => the cache size in percent
'percent': percent, => the current progress in percent of the download
'oldpercent':oldpercent,
'initialpercent': initialpercent, => the initial percent at the start of the download (used for resume download)
Used for calculating the percent cache
'percentcache': percentcache, => the percent to exceed for reaching the cache (initialpercent+cacheSize)
'percentforcache': percentforcache => the percent to exceed for reaching the cache
}
@param async: Boolean which indicates if the download should be start in an other thread
"""
# ___ Initialize all necessary variable
# ___ Set the filename
params['fileName'] = self._slugify(fileName)
# ___ Set the async value
if async:
params['async'] = 1
else:
params['async'] = 0
# ___ Set the playAtEnd value
if params['playAtEnd'] == 'True':
params['playAtEnd'] = 1
else:
params['playAtEnd'] = 0
# ___ Set the complete_path value
params['complete_path'] = os.path.join(params['destinationFolder'], params['fileName'].encode("utf-8"))
# ___ If incomplete path is not in params variable, the incomplete path will be the complete path.
if 'incomplete_path' not in params :
params['incomplete_path'] = params['complete_path']
else:
params['incomplete_path'] = os.path.join(params['incomplete_path'], params['fileName'].encode("utf-8"))
params['totalsize'] = float(self._getTargetFileSize(params))
params['current_dl_size'] = float(0)
params['current_percent_dl'] = float(0)
params['dl_status' ] = self.STATUS_STOPPED
params['queuePosition'] = self.__queue__._getLastIndex() + 1
# ___ Add element to the queue
self.__queue__._clearQueue()
self.__queue__._addToQueue(params['fileName'], params)
if params['async'] == 1:
xbmc.log("[AdvDownloader] Async", xbmc.LOGDEBUG)
self._run_async(self._startDownload)
xbmc.log("[AdvDownloader] Download added to the queue", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Normal", xbmc.LOGDEBUG)
self._startDownload()
xbmc.log("[AdvDownloader] Download done", xbmc.LOGDEBUG)
def _processQueueDownload(self):
item = self.__queue__._getNextItemFromQueue()
if item:
# __ If the progress dialog does not exist, we initialize it.
if not self.pDialog :
self.pDialog = xbmcgui.DialogProgressBG()
self.pDialog.create("Progressbar", "")
while item:
# __ Default status = 500
status = 500
self._setPaths(item)
# __ Verify parameters in item
if not "url" in item:
xbmc.log("URL missing : %s" % repr(item), xbmc.LOGERROR)
elif item["url"].find("ftp") > -1 or item["url"].find("http") > -1:
# __ Start to download a new item if it is a http or ftp url :
# - Set the 'StopDownloaderQueue' to 'False'
# - Download the file
self.__queue__._setStopFlag(False)
status = self._downloadURL(item)
else:
# __ Bad URL
xbmc.log("[AdvDownloader] Bad url : "+item["url"], xbmc.LOGERROR)
# __ If status = 200, the download is complete
if status == 200:
if xbmcvfs.exists(item["incomplete_path"]):
# __ Move the file from temp directory to the complete path.
xbmc.log("[AdvDownloader] Moving %s to %s" % (repr(item["incomplete_path"]), repr(item["complete_path"])),xbmc.LOGDEBUG)
if repr(item["incomplete_path"]) != repr(item["complete_path"]):
xbmcvfs.rename(item["incomplete_path"], item["complete_path"])
# __ Extract file if necessary
# __ Extract rar file
if str(item["complete_path"]).endswith(".rar"):
xbmc.executebuiltin("XBMC.Extract("+str(item["complete_path"])+","+item["download_path"].decode("utf-8")+")")
# __ Extract all zip file
elif str(item["complete_path"]).endswith(".zip"):
with zipfile.ZipFile(str(item["complete_path"]), "r") as compressFile:
compressFile.extractall(item["download_path"].decode("utf-8"))
# __ Display complete message
self._showMessage("Download complete", item['fileName'])
# __ Launch video if it is asked
if 'playAtEnd' in item and int(item['playAtEnd'])==1:
if not str(item["complete_path"]).endswith(".rar") and not str(item["complete_path"]).endswith(".zip"):
try:
xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(str(item["complete_path"]))
except Exception:
xbmc.log("[AdvDownloader] Download complete, play movie failed",xbmc.LOGDEBUG)
self._showMessage("Play movie failed", "ERROR")
else:
xbmc.log("[AdvDownloader] Download complete, but file %s not found" % repr(item["incomplete_path"]),xbmc.LOGERROR)
self._showMessage("Download failed", "ERROR")
# __ Else if the status = 300, the download is failed
elif status != 300:
xbmc.log("[AdvDownloader] Failure: " + repr(item) + " - " + repr(status),xbmc.LOGERROR)
self._showMessage("Download failed", "unknown error")
# __ If status = 300, the download is just stopped.
if status == 300:
item = False
# __ Else delete incomplete path, and remove the item in the queue.
else:
if xbmcvfs.exists(item["incomplete_path"]) and repr(item["incomplete_path"]) != repr(item["complete_path"]):
xbmcvfs.delete(item["incomplete_path"])
self.__queue__._removeToQueue(item['fileName'])
# __ Get the next download
item = self.__queue__._getNextItemFromQueue()
xbmc.log("[AdvDownloader] Finished download queue.",xbmc.LOGDEBUG)
# __ Close the progress dialog at the end, if necessary
if self.pDialog:
self.pDialog.close()
xbmc.log("[AdvDownloader] Closed dialog",xbmc.LOGDEBUG)
self.pDialog = None
def _downloadURL(self,item):
"""
Method to download a file from an url
@param item: the dictionnary with informations about the file to download
{'url': url,'incomplete_path': incomplete_path,'complete_path': complete_path,'playAtEnd': playAtEnd }
@note:
-add 'old_percent' to item
-add 'percentforcache' to item
"""
xbmc.log('[AdvDownloader] '+item["fileName"],xbmc.LOGDEBUG)
item["dl_status"]=self.STATUS_STARTING
url = urllib2.Request(item["url"])
shouldRestartDl = False
params = {"bytes_so_far": 0, "mark": 0.0, "queue_mark": 0.0, "obytes_so_far": 0}
item["current_percent_dl"] = 0.1
item["old_percent"] = -1
# __ Set the useragent in the header
if "useragent" in item:
url.add_header("User-Agent", item["useragent"])
else:
url.add_header("User-Agent", self.HEADER_CFG['User-Agent'])
# __ Open the temporary file 'incomplete_path'
if "current_dl_size" in item and float(item['current_dl_size']) > 0:
# _ If we resume the download, we open the file with parameters 'ab' for appending bytes
file = open(item["incomplete_path"], "ab")
else:
file = open(item["incomplete_path"], "wb")
shouldRestartDl = True
# __ If we should resume the download, add in the header "Range" with the file size
if "current_dl_size" in item and float(item['current_dl_size']) > 0:
xbmc.log("[AdvDownloader] Current size "+str(item['current_dl_size'])+" / Total size : "+str(item["totalsize"]),xbmc.LOGDEBUG)
url.add_header("Range","bytes=%s-%s" % (item['current_dl_size'],item["totalsize"]))
params["bytes_so_far"] = item['current_dl_size']
# __ Start the connexion
con = urllib2.urlopen(url)
# __ Get headers informations, to know if we can resume the download
responseHeader = con.info()
if 'Accept-Ranges' in responseHeader:
xbmc.log("[AdvDownloader] Accept-Ranges: "+responseHeader['Accept-Ranges'],xbmc.LOGINFO)
if "current_dl_size" in item and 'Accept-Ranges' in responseHeader and responseHeader['Accept-Ranges'] == 'none':
# ___ If we can't resume the download, we re start the download
url = urllib2.Request(item["url"])
# __ Set the useragent in the header
if "useragent" in item:
url.add_header("User-Agent", item["useragent"])
else:
url.add_header("User-Agent", self.HEADER_CFG["User-Agent"])
# __ Delete the temporary file 'incomplete_path'
xbmcvfs.delete(item["incomplete_path"])
# __ Open the temporary file 'incomplete_path'
file = open(item["incomplete_path"], "wb")
# close the current connection
con.close()
# __ Restart the connection
con = urllib2.urlopen(url)
# __ Set shouldRestartDl to True
shouldRestartDl = True
item['current_dl_size'] = float(0)
# __ Set the chunk_size
chunk_size = 1024 * 8
# __ If we should resume the download, calculate the percent
if "current_dl_size" in item and float(item['current_dl_size']) > 0 :
# __ Calculate the percent
self._generatePercent(item, params)
try:
# __ Download the file until it is complete or until asking for stop
item["dl_status"]=self.STATUS_DOWNLOADING
while (not self.__queue__._shouldStop() ):
# Read next 'chunk_size'
chunk = con.read(chunk_size)
# Write
file.write(chunk)
# Increase bytes_so_far
params["bytes_so_far"] += len(chunk)
if params["mark"] == 0.0 and params["bytes_so_far"] > 0:
params["mark"] = time.time()
xbmc.log("[AdvDownloader] Mark set",xbmc.LOGDEBUG)
# __ Calculate the percent
self._generatePercent(item, params)
# xbmc.log("recieved chunk: %s - %s" % ( repr(item["percent"] > item["old_percent"]), repr(time.time() - params["queue_mark"])),xbmc.LOGDEBUG)
if item["current_percent_dl"] > item["old_percent"] or time.time() - params["queue_mark"] > 30:
# __ Update the progress bar asynchronous if the download is not for streamer
self._run_async(self._updateProgress(item, params))
item["old_percent"] = item["current_percent_dl"]
params["obytes_so_far"] = params["bytes_so_far"]
# _ Break when the chunk is None
if not chunk:
break
# __ Close connection and the file
xbmc.log("[AdvDownloader] Loop done",xbmc.LOGDEBUG)
con.close()
file.close()
except Exception, e:
print str(e)
xbmc.log("Error : "+repr(e),xbmc.LOGERROR)
xbmc.log("Download failed.",xbmc.LOGERROR)
try:
con.close()
except:
xbmc.log("Failed to close download stream")
try:
file.close()
except:
xbmc.log("Failed to close file handle")
self._showMessage("Download failed", "ERROR")
# ___ Set status to stopped
item['dl_status'] = self.STATUS_ERROR
# __ Return 500 if the download is failed due to an error
return 500
if self.__queue__._shouldStop() and int(item["current_percent_dl"]) < 99 :
# __ Return 300 if the download is aborted
xbmc.log("[AdvDownloader] Download aborted : "+str(self.__queue__._shouldStop()),xbmc.LOGINFO)
# ___ Set status to stopped
item['dl_status'] = self.STATUS_STOPPED
self.__queue__._updateQueueItem(item['fileName'],item)
return 300
# ___ Set status to stopped
item['dl_status'] = self.STATUS_FINISHED
self.__queue__._updateQueueItem(item['fileName'],item)
# __ Return 200 if the download is complete
xbmc.log("[AdvDownloader] _downloadURL Done",xbmc.LOGERROR)
return 200
def _setPaths(self, params):
""""
_setPaths Method
Method to set :
-the 'incomplete_path' in the 'params' variable
-the 'complete_path' in the 'params' variable
@param params: the dictionnary with informations about the file to download
"""
xbmc.log('[AdvDownloader] '+params['fileName'], xbmc.LOGDEBUG)
# Check utf-8 stuff here
xbmc.log("[AdvDownloader] Path incomplete: "+params["incomplete_path"], xbmc.LOGDEBUG)
xbmc.log("[AdvDownloader] Path complete: "+params["complete_path"], xbmc.LOGDEBUG)
# __ If the 'complete_path' already exists, delete it
if xbmcvfs.exists(params["complete_path"]):
xbmc.log("[AdvDownloader] Removing existing %s" % repr(params["complete_path"]), xbmc.LOGDEBUG)
xbmcvfs.delete(params["complete_path"])
# __ If the 'incomplete_path' already exists, delete it
if xbmcvfs.exists(params["incomplete_path"]):
if self._confirmResume(self.__addon__.getLocalizedString(33207),self.__addon__.getLocalizedString(33208)+params['fileName']):
xbmc.log("[AdvDownloader] Resuming incomplete %s" % repr(params["incomplete_path"]), xbmc.LOGDEBUG)
params['current_dl_size']=self._getFileSize(params["incomplete_path"])
else:
xbmc.log("[AdvDownloader] Removing incomplete %s" % repr(params["incomplete_path"]), xbmc.LOGDEBUG)
xbmcvfs.delete(params["incomplete_path"])
xbmc.log("[AdvDownloader] _setPaths Done", xbmc.LOGDEBUG)
def _generatePercent(self, item, params):
"""
Method _generatePercent
@param item: the item for updating the percent
@param params: all parameters associated with the item
"""
get = params.get
iget = item.get
new_delta = False
if "last_delta" in item:
if time.time() - item["last_delta"] > 0.2:
new_delta = True
else:
item["last_delta"] = 0.0
new_delta = True
item['current_dl_size'] = get("bytes_so_far")
if item["totalsize"] > 0 and new_delta:
item["current_percent_dl"] = ( float(get("bytes_so_far")) * 100) / float(item["totalsize"])
elif iget("duration") and get("mark") != 0.0 and new_delta:
time_spent = time.time() - get("mark")
item["current_percent_dl"] = time_spent / int(iget("duration")) * 100
xbmc.log("[AdvDownloader] Time spent: %s. Duration: %s. Time left: %s (%s)" % (int(time_spent), int(iget("duration")),
int(int(iget("duration")) - time_spent),
self._convertSecondsToHuman(int(iget("duration")) - time_spent)), xbmc.LOGDEBUG)
elif new_delta:
xbmc.log("[AdvDownloader] cycle - " + str(time.time() - item["last_delta"]), xbmc.LOGDEBUG)
delta = time.time() - item["last_delta"]
if delta > 10 or delta < 0:
delta = 5
item["current_percent_dl"] = iget("old_percent") + delta
if item["current_percent_dl"] >= 100:
item["current_percent_dl"] -= 100
item["old_percent"] = item["current_percent_dl"]
if new_delta:
item["last_delta"] = time.time()
def _updateProgress(self, item, params):
"""
Method _updateProgress
@param item: the current item
@param params: the dictionnary with informations about the file to download
"""
get = params.get
iget = item.get
queue = False
new_mark = time.time()
if new_mark == get("mark"):
speed = 0
else:
speed = int((get("bytes_so_far") / 1024) / (new_mark - get("mark")))
if new_mark - get("queue_mark") > 1.5:
heading = u"[%s] %sKb/s (%.2f%%)" % (self.__queue__._getLastIndex(), speed, item["current_percent_dl"])
xbmc.log("[AdvDownloader] Updating %s - %s" % (heading, item['fileName'].encode("utf-8")), xbmc.LOGDEBUG)
params["queue_mark"] = new_mark
self.__queue__._updateQueueItem(item['fileName'],item)
if xbmc.Player().isPlaying() and xbmc.getCondVisibility("VideoPlayer.IsFullscreen"):
# __ Hide the progress dialog if we start to play a movie
if self.pDialog:
self.pDialog.close()
self.pDialog = None
else:
# __ Initialize the progress dialog if it is closed
if not self.pDialog:
self.pDialog = xbmcgui.DialogProgressBG()
self.pDialog.create("Preparing download", "")
heading = u"[%s] %s - %.2f%% (%s Kb/s)" % (str(self.__queue__._getLastIndex()), "Downloading", float(item["current_percent_dl"]),speed)
xbmc.log("[AdvDownloader] Heading : "+heading, xbmc.LOGDEBUG)
# __ Update the progress dialog
if iget("Title"):
self.pDialog.update(int(item["current_percent_dl"]), heading, iget("Title"))
else:
xbmc.log("[AdvDownloader] Try to update the dialog",xbmc.LOGDEBUG)
self.pDialog.update(int(item["current_percent_dl"]), heading, item['fileName'])
xbmc.log("[AdvDownloader] _updateProgress Done", xbmc.LOGDEBUG)
def _startDownload(self):
self._processQueueDownload()
def _stopDownload(self):
self.__queue__._askStop()
def _pauseDownload(self):
pass
def _resumeDownload(self):
pass
def getQueue(self):
return self.__queue__._getQueue()
def _run_async(self, func, *args, **kwargs):
"""
Method _run_async
@param func: the function to execute asynchronous
@param *args: the arguments passed into the function called
@param **kwargs: others arguments
@return the thread started
"""
from threading import Thread
worker = Thread(target=func, args=args, kwargs=kwargs)
worker.start()
return worker
def _showMessage(self, heading, message):
"""
Method to show a notification
@param heading : the heading text
@param message : the message of the notification
"""
xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % (heading, message.encode("utf-8"), 2000)).encode("utf-8"))
def _showDialog(self, heading, message):
"""
Method to show a "ok" dialog window
@param heading : the heading text
@param message : the message of the dialog window
"""
dialog = xbmcgui.Dialog()
dialog.ok(heading, message)
def _confirmResume(self, heading, line1, line2="", line3=""):
"""
Method to ask a confirmation for resuming the download
@param heading: the heading text
@param line1 : the first line of the confirmation dialog
@param line2 : the second line of the confirmation dialog
@param line3 : the third line of the confirmation dialog
@return: true if the user confirm the resume of dialog
"""
dialog = xbmcgui.Dialog()
return dialog.yesno(heading, line1, line2, line3)
def _getTargetFileSize(self, item):
"""
Method to get a size of a file from an url
@param itemParams: Dictionnary which represents the file. It contains the url to downlaod the file.
@return the item with the size of the file in the parameter 'total_size'
"""
url = urllib2.Request(item["url"], headers=AdvancedDownloader.HEADER_CFG)
# __ Set the useragent in the header
if "useragent" in item:
url.add_header("User-Agent", item["useragent"])
# __ Start the connexion
con = urllib2.urlopen(url)
total_size = 0
# __ Set the total size
if con.info().getheader("Content-Length").strip():
total_size = int(con.info().getheader("Content-Length").strip())
# __ Return the total size
return total_size
def _slugify(self, value):
"""
Method to :
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
extension = value[len(value) - 4:]
value = value[:len(value) - 4]
value = value.decode("utf-8")
value = value.decode('unicode-escape','ignore')
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s\.-]', ' ', value).strip().lower())
value = re.sub('[-\s]+', ' ', value)
return value + extension
def _convertSecondsToHuman(self, seconds):
"""
Method _convertSecondsToHuman
@param seconds:the number of seconds
@return a number of seconds if the input is inferior of 60 seconds, else return a number of minutes
"""
seconds = int(seconds)
if seconds < 60:
return "~%ss" % (seconds)
elif seconds < 3600:
return "~%sm" % (seconds / 60)
def _isStarted(self):
pass
class QueueClass():
"""
Defaut queue class
"""
def _getQueue(self):
pass
def _getNextItemFromQueue(self):
pass
def _getLastIndex(self):
pass
def _addToQueue(self, fileName, params):
pass
def _removeToQueue(self, fileName, params={}):
pass
def _clearQueue(self):
pass
def _updateQueueItem(self,fileName,params):
pass
def _shouldStop(self):
pass
def _askStop(self):
pass
def _setStopFlag(self,shouldStop):
pass
class DatabaseQueue(QueueClass):
"""
Database queue class
"""
def __init__(self):
self.__addon__ = xbmcaddon.Addon(id='script.module.seko.downloader')
self.__addonDir__ = xbmc.translatePath(self.__addon__.getAddonInfo('path'))
# ___ Initialize database
# ___ db file
self.__dbFile__ = os.path.join(self.__addonDir__, 'AdvancedDownloader.db')
try:
self.createDatabase()
except:
xbmc.log("[AdvDownloader] Error during connection to the downloader database", xbmc.LOGERROR)
def createDatabase(self):
# ___ Create table with columns:
# queuePosition INT
# fileName TEXT
# url TEXT
# title TEXT
# incomplete_path TEXT
# complete_path TEXT
# total_size REAL
# current_dl_size REAL
# current_percent_dl REAL
# dl_status INT
# async INT
# playAtEnd INT
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_create_db = "CREATE TABLE IF NOT EXISTS AdvDownloader (queuePosition INTEGER, fileName TEXT, url TEXT, title TEXT, incomplete_path TEXT, complet_path TEXT, total_size REAL, current_dl_size REAL, current_percent_dl REAL, dl_status INTEGER, async INTEGER, playAtEnd INTEGER);"
dbCursor.execute(sql_create_db)
try:
db.commit()
xbmc.log("[AdvDownloader] Database created",xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during creating database with query " + sql_create_db, xbmc.LOGERROR)
def _getQueue(self):
resultList = []
# ___ Select sql (12)
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_select = "SELECT * from AdvDownloader;"
# queuePosition
# fileName
# url
# title
# incomplete_path
# complete_path
# total_size
# current_dl_size
# current_percent_dl
# dl_status
# async
# playAtEnd
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0:
for i in result:
itemJSON = {
'queuePosition': i[0],
'fileName': i[1],
'url':i[2],
'title':i[3],
'destinationFolder':i[5],
'webBrowserId': 0,
'incomplete_path': i[4],
'complete_path': i[5],
'totalsize': float(i[6]),
'current_dl_size': float(i[7]),
'current_percent_dl': float(i[8]),
'dl_status': int(i[9]),
'async': int(i[10]),
'playAtEnd': int(i[11])
}
resultList.append(itemJSON)
xbmc.log("[AdvDownloader] Get the queue list success in db" , xbmc.LOGINFO)
return resultList
except Exception, e:
xbmc.log(str(e))
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return None
def _getNextItemFromQueue(self):
# ___ Select sql (12)
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_select = "SELECT * FROM AdvDownloader ORDER BY queuePosition ASC;"
# queuePosition
# fileName
# url
# title
# incomplete_path
# complete_path
# total_size
# current_dl_size
# current_percent_dl
# dl_status
# async
# playAtEnd
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0:
itemJSON = {
'queuePosition': result[0][0],
'fileName': result[0][1],
'url':result[0][2],
'title':result[0][3],
'destinationFolder':result[0][5],
'webBrowserId': 0,
'incomplete_path': result[0][4],
'complete_path': result[0][5],
'totalsize': float(result[0][6]),
'current_dl_size': float(result[0][7]),
'current_percent_dl': float(result[0][8]),
'dl_status': int(result[0][9]),
'async': int(result[0][10]),
'playAtEnd': int(result[0][11])
}
xbmc.log("[AdvDownloader] Find next element %s success in db" % (itemJSON['fileName']), xbmc.LOGINFO)
return itemJSON
except Exception, e:
print str(e)
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return None
def _getLastIndex(self):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
# ___ Select sql (12)
sql_select = "SELECT max(queuePosition) from AdvDownloader;"
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0:
if result[0][0] == None:
maxIndex = 0
else:
maxIndex = int(result[0][0])
xbmc.log("[AdvDownloader] Find last index %s success in db" % (maxIndex), xbmc.LOGINFO)
return maxIndex
except Exception, e:
print str(e)
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return 0
def _searchItem(self,item):
"""
Method to search an item in the queue
@param item: the item to search
"""
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
# ___ Select sql (12)
sql_select = "SELECT * from AdvDownloader WHERE fileName = '"+item['fileName']+"';"
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0 :
if result[0][0] == None:
xbmc.log("[AdvDownloader] No element %s in db" % (item['fileName']), xbmc.LOGINFO)
return None
else:
itemJSON = {
'queuePosition': result[0][0],
'fileName': result[0][1],
'url':result[0][2],
'title':result[0][3],
'destinationFolder':result[0][5],
'webBrowserId': 0,
'incomplete_path': result[0][4],
'complete_path': result[0][5],
'totalsize': float(result[0][6]),
'current_dl_size': float(result[0][7]),
'current_percent_dl': float(result[0][8]),
'dl_status': int(result[0][9]),
'async': int(result[0][10]),
'playAtEnd': int(result[0][11])
}
xbmc.log("[AdvDownloader] Find element %s success in db" % (item['fileName']), xbmc.LOGINFO)
return itemJSON
except Exception, e:
print str(e)
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return None
def _addToQueue(self, fileName, params):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
if self._searchItem(params) is None:
index = self._getLastIndex() + 1;
# ___ Insert value (12)
# queuePosition
# fileName
# url
# title
# incomplete_path
# complete_path
# total_size
# current_dl_size
# current_percent_dl
# dl_status
# async
# playAtEnd
sql_insert = "INSERT INTO AdvDownloader VALUES ( %s, '%s', '%s', '%s', '%s', '%s', %s, %s, %s, %s, %s, %s);" % (index, fileName, params['url'], params['title'], params['incomplete_path'], params['complete_path'], str(params['totalsize']), 0, 0, 0, int(params['async']), int(params['playAtEnd']))
dbCursor.execute(sql_insert)
try:
db.commit()
xbmc.log("[AdvDownloader] Insert success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during insertion execution in db with query : " + sql_insert, xbmc.LOGERROR)
def _removeToQueue(self, fileName, params={}):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
clause_value = "fileName = '%s'" % (fileName)
if 'url' in params:
clause_value = clause_value + " AND url = '%s'" % (params['url'])
sql_delete = "DELETE FROM AdvDownloader WHERE %s ;" % (clause_value)
dbCursor.execute(sql_delete)
try:
db.commit()
xbmc.log("[AdvDownloader] Delete success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during delete execution in db with query : " + sql_delete, xbmc.LOGERROR)
def _updateQueueItem(self,fileName,params):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
clause_value = "fileName = '%s'" % (fileName)
if 'url' in params:
clause_value = clause_value + " AND url = '%s'" % (params['url'])
sql_update = "UPDATE AdvDownloader SET "
sql_update = sql_update+" current_dl_size = "+str(params['current_dl_size'])+ ","
sql_update = sql_update+" current_percent_dl = "+str(params['current_percent_dl'])+ ","
sql_update = sql_update+" dl_status = "+str(params['dl_status'])
sql_update = sql_update+" WHERE %s ;" % (clause_value)
dbCursor.execute(sql_update)
try:
db.commit()
xbmc.log("[AdvDownloader] Update success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during update execution in db with query : " + sql_update, xbmc.LOGERROR)
def _clearQueue(self):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_delete = "DELETE FROM AdvDownloader;"
dbCursor.execute(sql_delete)
try:
db.commit()
xbmc.log("[AdvDownloader] Clear success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during delete execution in db with query : " + sql_delete, xbmc.LOGERROR)
def __del__(self):
# ___ Destroy object
pass
def _shouldStop(self):
"""
Method _shouldStop
@return True if we ask to stop all downloads, else return False
"""
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
# ___ Select sql (12)
sql_select = "SELECT * from AdvDownloader WHERE dl_status < 3;"
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0 :
if result[0][0] == None:
xbmc.log("[AdvDownloader] No download started in db", xbmc.LOGINFO)
return True
else:
itemJSON = {
'queuePosition': result[0][0],
'fileName': result[0][1],
'url':result[0][2],
'title':result[0][3],
'destinationFolder':result[0][5],
'webBrowserId': 0,
'incomplete_path': result[0][4],
'complete_path': result[0][5],
'totalsize': float(result[0][6]),
'current_dl_size': float(result[0][7]),
'current_percent_dl': float(result[0][8]),
'dl_status': int(result[0][9]),
'async': int(result[0][10]),
'playAtEnd': int(result[0][11])
}
xbmc.log("[AdvDownloader] Find element in download", xbmc.LOGINFO)
return False
except Exception, e:
print str(e)
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return True
def _askStop(self):
"""
Method _askStop
Ask to stop all downloads
"""
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_update = "UPDATE AdvDownloader SET dl_status = 3 WHERE dl_status < 3;"
dbCursor.execute(sql_update)
try:
db.commit()
xbmc.log("[AdvDownloader] Stop download - Update success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during update execution in db with query : " + sql_update, xbmc.LOGERROR)
def _setStopFlag(self,shouldStop):
pass
class CacheQueue(QueueClass):
"""
Cache queue class
"""
def __init__(self):
# Initialize 'cache' variables
try:
import StorageServer
except:
import storageserverdummy as StorageServer
self.cache = StorageServer.StorageServer("AdvDownloader")
def _getQueue(self):
"""
Method to get the queue
@return the queue
@attention: Use this method, only for read the queue
"""
queue = self.cache.get("AdvDownloaderQueue")
try:
items = eval(queue)
except:
items = {}
xbmc.log("[AdvDownloader] _getQueue Done: " + str(len(items)), xbmc.LOGDEBUG)
return items
def moveItemToPosition(self, filename, position):
"""
Method moveItemToPosition
@param filename: The name of the file
@param position: The new index of the item in the queue
"""
if position > 0 and self.cache.lock("AdvDownloaderQueueLock"):
items = []
if filename:
queue = self.cache.get("AdvDownloaderQueue")
xbmc.log("[AdvDownloader] Queue loaded : " + repr(queue), xbmc.LOGDEBUG)
if queue:
try:
items = eval(queue)
except:
items = []
xbmc.log("[AdvDownloader] Pre items: %s " % repr(items), xbmc.LOGDEBUG)
# ___ Move item in the position
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
del items[index]
items = items[:position] + [(filename, item)] + items[position:]
break
# ___ Recalculate queuePosition
for index, itemQueue in enumerate(items):
(item_id, item) = itemQueue
item['queuePosition'] = index
del items[index]
items = items[:index] + [(item_id, item)] + items[index:]
xbmc.log("[AdvDownloader] Post items: %s " % repr(items), xbmc.LOGDEBUG)
self.cache.set("AdvDownloaderQueue", repr(items))
self.cache.unlock("AdvDownloaderQueueLock")
xbmc.log("[AdvDownloader] moveItemToPosition Done", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Couldn't lock AdvDownloaderQueueQueueLock in the method _moveItemToPosition", xbmc.LOGDEBUG)
def _getNextItemFromQueue(self):
"""
_getNextItemFromQueue : Method to get the next item into the queue
@return the next item in the queue
the item has the format : (filename, params)
"""
if self.cache.lock("AdvDownloaderQueueLock"):
# ___ Initialiaze the items array
items = []
# ___ Get the current queue
queue = self.cache.get("AdvDownloaderQueue")
xbmc.log("[AdvDownloader] Queue loaded : " + repr(queue), xbmc.LOGDEBUG)
if queue:
try:
items = eval(queue)
except:
items = False
item = {}
# ___ If the current queue is not empty, we get the next item
if len(items) > 0:
item = items[0]
xbmc.log("[AdvDownloader] Returning : " + item[0], xbmc.LOGDEBUG)
self.cache.unlock("AdvDownloaderQueueLock")
if item:
(fileName, item) = item
return item
else:
return False
else:
xbmc.log("[AdvDownloader] Couldn't aquire lock on AdvDownloaderQueueLock in the method _getNextItemFromQueue", xbmc.LOGDEBUG)
def _getLastIndex(self):
"""
Method to return the last index of the queue
"""
return len(self._getQueue())
def _addToQueue(self, fileName, params):
"""
Method _addToQueue
@param filename: the name of the file to download
@param params: the dictionnary with informations about the file to download
"""
if self.cache.lock("AdvDownloaderQueueLock"):
items = []
if filename:
queue = self.cache.get("AdvDownloaderQueue")
xbmc.log("[AdvDownloader] Queue loaded : " + repr(queue), xbmc.LOGDEBUG)
if queue:
try:
items = eval(queue)
except:
items = []
append = True
# __ Verify if the item is already into the queue
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
# __ If the item is already into the queue, we will delete it
append = False
del items[index]
break
# __ If we should add the item in the queue
if append:
items.append((filename, params))
xbmc.log("[AdvDownloader] Added: " + filename.decode('utf-8') + " to queue - " + str(len(items)).decode('utf-8'), xbmc.LOGDEBUG)
# __ Else we should insert the item in the head of the queue
else:
items.insert(1, (filename, params)) # 1 or 0?
xbmc.log("[AdvDownloader] Moved " + filename.decode('utf-8') + " to front of queue. - " + str(len(items)).decode('utf-8'), xbmc.LOGDEBUG)
# __ Set the new queue
self.cache.set("AdvDownloaderQueue", repr(items))
# __ Unlock the queue
self.cache.unlock("AdvDownloaderQueueLock")
xbmc.log("[AdvDownloader] _addItemToQueue Done", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Couldn't lock AdvDownloaderQueueLock on _addItemToQueue", xbmc.LOGERROR)
def _removeToQueue(self, fileName, params={}):
"""
_removeToQueue Method
@param fileName :the filename to remove of the download queue
@param params : All associate parameters
"""
if self.cache.lock("AdvDownloaderQueueLock"):
items = []
queue = self.cache.get("AdvDownloaderQueue")
xbmc.log("[AdvDownloader] Queue loaded : " + repr(queue), xbmc.LOGDEBUG)
if queue:
try:
items = eval(queue)
except:
items = []
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
self._removeTempFile(filename, item)
del items[index]
self.cache.set("AdvDownloaderQueue", repr(items))
xbmc.log("[AdvDownloader] Removed: " + filename.decode('utf-8') + " from queue", xbmc.LOGDEBUG)
self.cache.unlock("AdvDownloaderQueueLock")
xbmc.log("[AdvDownloader] Remove item from queue : Done")
else:
xbmc.log("[AdvDownloader] Exception in _removeToQueue", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Couldn't lock AdvDownloaderQueueLock on _removeToQueue", xbmc.LOGERROR)
def _clearQueue(self):
"""
_clearQueue Method
"""
if self.cache.lock("AdvDownloaderQueueLock"):
items = []
self.cache.set("AdvDownloaderQueue", repr(items))
xbmc.log("[AdvDownloader] Clear queue successful ", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Couldn't lock AdvDownloaderQueueLock on _clearQueue", xbmc.LOGERROR)
def _shouldStop(self):
"""
Method _shouldStop
@return True if we ask to stop all downloads, else return False
@warning: this method read the cache for the value "StopDownloaderQueue"
"""
shouldStop = False
shouldStopStr = self.cache.get("AdvDownloaderStop")
if shouldStopStr is not None:
try:
"""
@bug: SyntaxError: ('unexpected EOF while parsing', ('<string>', 0, 0, ''))
@note : Not use eval(shouldStopStr) to avoid SyntaxError
"""
if shouldStopStr == "True" :
shouldStop = True
except Exception:
pass
return shouldStop
def _askStop(self):
"""
Method _askStop
Ask to stop all downloads
@warning: this method read and set the cache for the value "AdvDownloaderStop"
"""
shouldStopStr = self.cache.get("AdvDownloaderStop")
"""
@bug: SyntaxError: ('unexpected EOF while parsing', ('<string>', 0, 0, ''))
@note : Not use eval(shouldStopStr) to avoid SyntaxError
"""
if shouldStopStr == "False":
self.cache.set("AdvDownloaderStop", repr(True))
def _setStopFlag(self,shouldStop):
"""
Method _setStopFlag
Ask to set the flag AdvDownloaderStop
@warning: this method read and set the cache for the value "AdvDownloaderStop"
"""
self.cache.set("AdvDownloaderStop", repr(shouldStop)) | gpl-3.0 | 4,965,653,996,201,922,000 | 40.900607 | 311 | 0.485821 | false |
Southpaw-TACTIC/TACTIC | src/tactic/ui/app/advanced_search.py | 1 | 60219 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["AdvancedSearchKeywordWdg", "AdvancedSearchSaveWdg", "AdvancedSearchSavedSearchesWdg", "AdvancedSearchSaveButtonsWdg",
"DeleteSavedSearchCmd", "SaveSearchCmd", "GetSavedSearchCmd", "SaveCurrentSearchCmd", "DeleteRecentSearchCmd"]
from pyasm.common import Environment, Xml, jsonloads, jsondumps
from pyasm.command import Command
from pyasm.search import Search, SearchType
from pyasm.web import DivWdg, HtmlElement
from pyasm.widget import CheckboxWdg
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.filter import BaseFilterWdg, FilterData
from tactic.ui.input import LookAheadTextInputWdg, TextInputWdg
class AdvancedSearchKeywordWdg(BaseFilterWdg):
def init(self):
self.search_type = self.options.get("search_type")
if not self.search_type:
self.search_type = self.kwargs.get("search_type")
stype_columns = SearchType.get_columns(self.search_type)
self.columns = self.kwargs.get('columns')
if self.columns:
self.columns = self.columns.split('|')
else:
self.columns = self.options.get("columns")
if self.columns:
self.columns = self.columns.split('|')
if not self.columns:
self.columns = []
# need a way to specify the columns
sobject = SearchType.create(self.search_type)
if hasattr(sobject, 'get_search_columns'):
self.columns = sobject.get_search_columns()
self.columns.append('id')
if 'code' in stype_columns:
self.columns.append('code')
self.prefix = self.kwargs.get("prefix")
#self.text.set_persist_on_submit(prefix=self.prefix)
#self.set_filter_value(self.text, filter_index)
self.stype_columns = []
self.text_value = ''
def get_styles(self):
styles = HtmlElement.style('''
/* Look ahead and tags */
.spt_look_ahead_top {
padding: 20px 20px 10px 20px;
color: grey;
}
.spt_look_ahead_top .spt_template_item {
display: none !important;
}
.spt_look_ahead_top .spt_look_ahead_header {
position: relative;
display: grid;
grid-template-columns: auto 35px;
grid-gap: 15px;
}
.spt_look_ahead_top .spt_look_ahead_header .info-icon {
border-radius: 20px;
margin: 5px;
//border: 1px solid grey;
display: flex;
align-items: center;
justify-content: center;
font-size: 15px;
}
.spt_look_ahead_top .spt_recent_searches {
font-size: 12px;
box-shadow: rgba(0, 0, 0, 0.1) 0px 0px 15px;
color: rgb(0, 0, 0);
top: 35px;
border-style: solid;
min-width: 220px;
border-width: 1px;
padding: 5px 10px 10px 5px;
border-color: rgb(187, 187, 187);
z-index: 1000;
background: rgb(255, 255, 255);
position: absolute;
left: 0;
}
.spt_look_ahead_top .spt_recent_search {
display: flex;
justify-content: space-between;
align-items:center;
padding: 3px;
cursor: hand;
}
.spt_look_ahead_top .spt_recent_search_label {
width: 100%;
}
.spt_look_ahead_top .spt_recent_search_remove {
font-style: italic;
color: red;
}
.spt_look_ahead_top .spt_text_input_wdg {
border-radius: 20px;
}
.spt_look_ahead_top .spt_search_tags {
transition: 0.25s;
}
.spt_look_ahead_top .spt_search_tags.empty .spt_clear_tags {
background: grey;
}
.spt_look_ahead_top .spt_clear_tags {
background: black;
color: #f4f4f4;
}
.spt_look_ahead_top .search-tag {
border-radius: 20px;
display: inline-block;
font-size: 12px;
padding: 4px 10px;
margin: 5px 5px 0 0;
}
.spt_look_ahead_top .spt_search_tags:not(.empty) .search-tag:hover {
box-shadow: 0 2px 4px 0 rgba(0, 0, 0, 0.15);
}
.spt_look_ahead_top .spt_search_tag_item {
position: relative;
padding-right: 20px;
background: #f4f4f4;
}
.spt_look_ahead_top .spt_search_tag_item .fa {
position: absolute;
right: 5;
top: 5;
cursor: pointer;
}
.spt_look_ahead_top .spt_validation_indicator {
position: absolute;
right: 60;
top: 9;
display: flex;
align-items: center;
justify-content: center;
width: 18px;
height: 18px;
border-radius: 10px;
color: white;
}
.spt_look_ahead_top .spt_validation_indicator.spt_pass {
background: lightgreen;
}
.spt_look_ahead_top .spt_validation_indicator.spt_fail {
background: red;
}
.spt_look_ahead_top .spt_validation_indicator.spt_pass .fa-times {
display: none;
}
.spt_look_ahead_top .spt_validation_indicator.spt_fail .fa-check{
display: none;
}
''')
return styles
def get_display(self):
look_ahead_top = self.top
look_ahead_top.add_class("spt_look_ahead_top spt_search_filter")
look_ahead_top.add_behavior({
'type': 'load',
'search_type': self.search_type,
'recent_searches': self.get_recent_searches(),
'cbjs_action': self.get_onload_js()
})
self.add_relay_behaviors(look_ahead_top)
look_ahead_header = DivWdg()
look_ahead_top.add(look_ahead_header)
look_ahead_header.add_class("spt_look_ahead_header")
custom_cbk = {
'enter': '''
if (els && spt.text_input.index > -1)
spt.advanced_search.keywords.add_keyword(bvr.src_el.value);
'''
}
columns = SearchType.get_columns(self.search_type)
on_search_complete = '''
let top = bvr.src_el.getParent(".spt_look_ahead_top");
let validator = top.getElement(".spt_validation_indicator");
let textInput = top.getElement(".spt_text_input");
let value = textInput.value;
let resultsContainer = top.getElement(".spt_input_text_results");
let resultDivs = resultsContainer.getElements(".spt_input_text_result");
let results = []
resultDivs.forEach(function(resultDiv){
results.push(resultDiv.innerText);
});
if (results.includes(value)) {
validator.removeClass("spt_fail");
validator.addClass("spt_pass");
} else {
validator.removeClass("spt_pass");
validator.addClass("spt_fail");
}
'''
if 'keywords' in columns:
look_ahead_wdg = LookAheadTextInputWdg(name="", width="100%", background="#f4f4f4", custom_cbk=custom_cbk, highlight=True,
on_search_complete=on_search_complete, keyword_mode="contains", search_type=self.search_type)
else:
look_ahead_wdg = LookAheadTextInputWdg(name="", width="100%", background="#f4f4f4", custom_cbk=custom_cbk, highlight=True,
on_search_complete=on_search_complete, keyword_mode="contains")
look_ahead_header.add(look_ahead_wdg)
info_wdg = DivWdg("<i class='fa fa-info'></i>")
look_ahead_header.add(info_wdg)
info_wdg.add_class("info-icon")
info_wdg.add_class("hand")
validation_indicator = DivWdg("<i class='fa fa-check'></i><i class='fa fa-times'></i>")
look_ahead_header.add(validation_indicator)
validation_indicator.add_class("spt_validation_indicator")
validation_indicator.add_style("display: none")
custom_dropdown = DivWdg()
look_ahead_header.add(custom_dropdown)
custom_dropdown.add_class("spt_recent_searches")
custom_dropdown.add_style("display: none")
recent_search = DivWdg()
custom_dropdown.add(recent_search)
recent_search.add_class("spt_recent_search")
recent_search.add_class("spt_template_item")
recent_search_label = DivWdg()
recent_search.add(recent_search_label)
recent_search_label.add_class("spt_recent_search_label")
recent_search_label.add_class("spt_input_text_result")
recent_search_remove = DivWdg("Remove")
recent_search.add(recent_search_remove)
recent_search_remove.add_class("spt_recent_search_remove")
recent_search_remove.add_behavior({
'type': 'click',
'search_type': self.search_type,
'cbjs_action': '''
let item = bvr.src_el.getParent(".spt_recent_search");
spt.advanced_search.keywords.remove_recent(item);
'''
})
custom_dropdown.add_behavior({
'type': 'load',
'cbjs_action': '''
bvr.src_el.on_complete = function(el) {
bvr.src_el.setStyle("display", "none");
}
let template = bvr.src_el.getElement(".spt_template_item");
let recent_searches = spt.advanced_search.keywords.recent_searches;
for (let i=0; i<recent_searches.length; i++) {
let value = recent_searches[i]
let clone = spt.behavior.clone(template);
let labelDiv = clone.getElement(".spt_recent_search_label");
clone.setAttribute("spt_value", value)
labelDiv.innerText = value;
labelDiv.setAttribute("spt_value", value)
clone.removeClass("spt_template_item");
bvr.src_el.appendChild(clone);
}
'''
})
#### tag clusters
tag_cluster = DivWdg()
look_ahead_top.add(tag_cluster)
tag_cluster.add_class("spt_search_tags")
# check if empty before adding this class
tag_cluster.add_class("empty")
clear_tags = DivWdg("Clear")
tag_cluster.add(clear_tags)
clear_tags.add_class("spt_clear_tags")
clear_tags.add_class("search-tag hand")
clear_tags.add_behavior({
'type': 'click',
'cbjs_action': '''
let tagsContainer = bvr.src_el.getParent(".spt_search_tags");
let items = tagsContainer.getElements(".spt_search_tag_item");
items.forEach(function(item){
if (item.hasClass("spt_template_item")) return;
item.remove();
})
tagsContainer.addClass("empty");
// extract and set keywords
spt.advanced_search.keywords.set_keywords();
'''
})
search_tag_item = DivWdg()
tag_cluster.add(search_tag_item)
search_tag_item.add_class("spt_search_tag_item")
search_tag_item.add_class("spt_template_item search-tag")
search_tag_label = DivWdg("#charHeroSimba")
search_tag_item.add(search_tag_label)
search_tag_label.add_class("spt_search_tag_label")
search_tag_close = DivWdg("<i class='fa fa-times-circle'></i>")
search_tag_item.add(search_tag_close)
search_tag_close.add_behavior({
'type': 'click',
'cbjs_action': '''
let tagsContainer = bvr.src_el.getParent(".spt_search_tags");
let item = bvr.src_el.getParent(".spt_search_tag_item");
item.remove();
if (tagsContainer.childElementCount == 2)
tagsContainer.addClass("empty");
// extract and set keywords
spt.advanced_search.keywords.set_keywords();
'''
})
look_ahead_top.add('''<input type="hidden" name="prefix" value="%s" class="spt_input">''' % self.prefix)
look_ahead_top.add('''<input type="hidden" name="%s_enabled" value="on" class="spt_input">''' % self.prefix)
look_ahead_top.add('''<input type="hidden" name="%s_search_text" value="" class="spt_input spt_keywords">''' % self.prefix)
look_ahead_top.add(self.get_styles())
return look_ahead_top
def get_onload_js(self):
return '''
if (typeof(spt.advanced_search) == "undefined") spt.advanced_search = {};
if (typeof(spt.advanced_search.keywords) == "undefined") spt.advanced_search.keywords = {};
spt.advanced_search.keywords.recent_searches = bvr.recent_searches;
spt.advanced_search.keywords.search_type = bvr.search_type;
'''
def add_relay_behaviors(self, top):
top.add_behavior({
'type': 'load',
'values': self.get_value(),
'cbjs_action': '''
if (bvr.values) {
let values = bvr.values.split(",");
values.forEach(function(value) {
spt.advanced_search.keywords.add_keyword(value);
});
}
'''
})
top.add_relay_behavior({
'type': 'mouseup',
'bvr_match_class': 'spt_input_text_result',
'cbjs_action': '''
var display = bvr.src_el.getAttribute("spt_display");
display = JSON.parse(display);
var value = bvr.src_el.getAttribute("spt_value");
if (!display) {
display = value;
}
spt.advanced_search.keywords.add_keyword(display);
if (bvr.src_el.hasClass("spt_recent_search_label")) {
let top = bvr.src_el.getParent(".spt_look_ahead_top");
let customDropdown = top.getElement(".spt_recent_searches");
spt.body.remove_focus_element(customDropdown);
customDropdown.on_complete();
} else {
spt.advanced_search.keywords.add_recent(value);
}
'''
})
top.add_relay_behavior({
'type': 'keyup',
'bvr_match_class': 'spt_text_input',
'cbjs_action': '''
let top = bvr.src_el.getParent(".spt_look_ahead_top");
let customDropdown = top.getElement(".spt_recent_searches");
spt.body.remove_focus_element(customDropdown);
customDropdown.on_complete();
let validator = top.getElement(".spt_validation_indicator");
let value = bvr.src_el.value;
if (value != "")
validator.setStyle("display", "");
else
validator.setStyle("display", "none");
'''
})
top.add_relay_behavior({
'type': 'click',
'bvr_match_class': 'spt_text_input',
'cbjs_action': '''
if (bvr.src_el.value != "") return;
let top = bvr.src_el.getParent(".spt_look_ahead_top");
let customDropdown = top.getElement(".spt_recent_searches");
customDropdown.setStyle("display", "");
spt.body.add_focus_element(customDropdown);
'''
})
def get_value(self):
filter_data = FilterData.get()
values = filter_data.get_values_by_index(self.prefix, 0)
return values.get("%s_search_text"%self.prefix)
def alter_search(self, search):
''' customize the search here '''
self.stype_columns = search.get_columns()
values = FilterData.get().get_values_by_index(self.prefix, 0)
# check if this filter is enabled
enabled = values.get("%s_enabled" % self.prefix)
value = self.get_value()
if enabled == None:
# by default, the filter is enabled
is_enabled = True
else:
is_enabled = (str(enabled) in ['on', 'true'])
if not is_enabled:
return
if is_enabled and value:
self.num_filters_enabled += 1
if not value:
return
self.text_value = value
search.add_op("begin")
for column in self.columns:
if not column in self.stype_columns:
continue
# id and code should be exact matches
if column == 'id':
try:
search.add_filter(column, int(value))
except ValueError:
pass
elif column != 'keywords':
search.add_filter(column, value)
#filter_string = Search.get_compound_filter(value, self.columns)
#if filter_string:
# search.add_where(filter_string)
# add keywords
column = 'keywords'
if value and column in self.stype_columns:
search.add_text_search_filter(column, value)
search.add_op("or")
def get_recent_searches(self):
search = Search("config/widget_config")
search.add_filter("view", "recent_searches")
search.add_filter("search_type", self.search_type)
search.add_user_filter()
config_sobj = search.get_sobject()
keywords = []
if config_sobj:
config_xml = config_sobj.get_xml_value("config")
from pyasm.widget import WidgetConfig, WidgetConfigView
config = WidgetConfig.get(view="recent_searches", xml=config_xml)
data = config_xml.get_value("config/recent_searches/values")
keywords = jsonloads(data)
return keywords
class SaveCurrentSearchCmd(Command):
def execute(self):
search_type = self.kwargs.get("search_type")
# values = self.kwargs.get("values")
value = self.kwargs.get("value")
if not value:
return
search = Search("config/widget_config")
search.add_filter("view", "recent_searches")
search.add_filter("search_type", search_type)
search.add_user_filter()
config_sobj = search.get_sobject()
if not config_sobj:
values = [value]
values_str = jsondumps(values)
config = "<config>\n"
config += "<recent_searches>\n"
# get all of the serialized versions of the filters
value_type = "json"
config += "<values type='%s'>%s</values>\n" % (value_type, values_str)
config += "</recent_searches>\n"
config += "</config>\n"
config_sobj = SearchType.create('config/widget_config')
config_sobj.set_value("view", 'recent_searches')
config_sobj.set_value("search_type", search_type)
config_sobj.set_user()
else:
config_xml = config_sobj.get_xml_value("config")
from pyasm.widget import WidgetConfig, WidgetConfigView
config = WidgetConfig.get(view="recent_searches", xml=config_xml)
data = config_xml.get_value("config/recent_searches/values")
values = jsonloads(data)
values.append(value)
values_str = jsondumps(values)
config = "<config>\n"
config += "<recent_searches>\n"
# get all of the serialized versions of the filters
value_type = "json"
config += "<values type='%s'>%s</values>\n" % (value_type, values_str)
config += "</recent_searches>\n"
config += "</config>\n"
config_sobj.set_value("config", config)
config_sobj.commit()
class DeleteRecentSearchCmd(Command):
def execute(self):
search_type = self.kwargs.get("search_type")
# values = self.kwargs.get("values")
value = self.kwargs.get("value")
search = Search("config/widget_config")
search.add_filter("view", "recent_searches")
search.add_filter("search_type", search_type)
search.add_user_filter()
config_sobj = search.get_sobject()
deleted = False
if config_sobj:
config_xml = config_sobj.get_xml_value("config")
from pyasm.widget import WidgetConfig, WidgetConfigView
config = WidgetConfig.get(view="recent_searches", xml=config_xml)
data = config_xml.get_value("config/recent_searches/values")
values = jsonloads(data)
values.remove(value)
values_str = jsondumps(values)
config = "<config>\n"
config += "<recent_searches>\n"
# get all of the serialized versions of the filters
value_type = "json"
config += "<values type='%s'>%s</values>\n" % (value_type, values_str)
config += "</recent_searches>\n"
config += "</config>\n"
config_sobj.set_value("config", config)
config_sobj.commit()
deleted = True
self.info["deleted"] = deleted
class AdvancedSearchSaveWdg(BaseRefreshWdg):
def get_styles(self):
styles = HtmlElement.style('''
/* Save */
.spt_save_top {
display: grid;
grid-template-rows: 40px auto;
background: white;
color: grey;
}
.spt_save_top .spt_save_header {
display: flex;
justify-content: space-between;
align-items: center;
background: black;
padding: 15px;
color: white;
}
.spt_save_top .spt_save_close {
}
.spt_save_top .spt_save_content {
margin: auto;
}
.spt_save_top .save-row {
display: flex;
padding: 5px 0;
}
.spt_save_top .spt_search_name_input {
width: 380px;
height: 35px;
border-radius: 20px;
border: 1px solid #ccc;
padding: 0 12px;
background: #f4f4f4;
}
.spt_save_top .search-button {
background: #eee;
border-radius: 20px;
margin-left: 15px;
display: flex;
align-items: center;
justify-content: center;
padding: 0 20px;
}
.spt_save_top input[type='checkbox'] {
margin: 0px !important;
}
.spt_save_top .spt_error_message {
color: red;
height: 14px;
}
''')
return styles
def get_display(self):
search_type = self.kwargs.get("search_type")
save_top = self.top
save_top.add_class("spt_save_top")
## header
save_header = DivWdg()
save_top.add(save_header)
save_header.add_class("spt_save_header")
save_title = DivWdg("Save")
save_header.add(save_title)
save_title.add_class("spt_save_title")
save_close = DivWdg("<i class='fa fa-times'></i>")
save_header.add(save_close)
save_close.add_class("spt_save_close")
save_close.add_class("hand")
save_close.add_behavior({
'type': 'click',
'cbjs_action': '''
let top = bvr.src_el.getParent(".spt_search_top");
let overlay = top.getElement(".overlay");
let saveTop = top.getElement(".spt_save_top");
overlay.removeClass("visible");
saveTop.removeClass("visible");
setTimeout(function(){
overlay.setStyle("display", "none");
}, 250);
'''
})
## content
save_content = DivWdg()
save_top.add(save_content)
save_content.add_class("spt_save_content")
save_first_row = DivWdg()
save_content.add(save_first_row)
save_first_row.add_class("spt_error_message")
save_first_row.add_class("save-row")
save_second_row = DivWdg()
save_content.add(save_second_row)
save_second_row.add_class("save-row")
look_ahead_wdg = TextInputWdg(name="search_name", width="380px", background="#f4f4f4")
save_second_row.add(look_ahead_wdg)
look_ahead_wdg.add_class("spt_input spt_search_name_input")
look_ahead_wdg.add_behavior({
'type': 'keyup',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_save_top");
var errorDiv = top.getElement(".spt_error_message");
errorDiv.innerText = "";
'''
})
search_button = DivWdg("Save")
save_second_row.add(search_button)
search_button.add_class("search-button")
search_button.add_class("hand")
search_button.add_behavior( {
'search_type': search_type,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_save_top");
var inputs = spt.api.get_input_values(top);
var errorDiv = top.getElement(".spt_error_message");
var value = inputs.search_name[0];
if (!value) {
spt.alert("No view name specified");
return;
}
var save_personal = inputs.my_searches[0] == "on";
var save_shared = inputs.shared_searches[0] == "on";
if (!save_personal && !save_shared) {
spt.alert("Please select a save location");
return;
}
var new_values = spt.advanced_search.generate_json();
var search_values_dict = JSON.stringify(new_values);
var options = {
'search_type': bvr.search_type,
'display': 'block',
'view': value,
'save_personal': save_personal,
'save_shared': save_shared
};
// replace the search widget
var server = TacticServerStub.get();
let on_complete = function(ret_val) {
// DEPENDENCY?
if (save_personal) {
let key = "my_searches";
spt.advanced_search.saved.create_item(key, value, value);
spt.advanced_search.saved.add_item(key, value, value);
}
if (save_shared) {
let key = "shared_searches";
spt.advanced_search.saved.create_item(key, value, value);
spt.advanced_search.saved.add_item(key, value, value);
}
if (save_personal || save_shared) {
spt.notify.show_message("Search saved");
let top = bvr.src_el.getParent(".spt_search_top");
let overlay = top.getElement(".overlay");
let saveTop = top.getElement(".spt_save_top");
overlay.removeClass("visible");
saveTop.removeClass("visible");
setTimeout(function(){
overlay.setStyle("display", "none");
}, 250);
}
}
let on_error = function(err) {
errorDiv.innerText = err;
}
var class_name = "tactic.ui.app.SaveSearchCmd";
server.execute_cmd(class_name, options, search_values_dict, {on_complete: on_complete, on_error: on_error});
'''
} )
save_third_row = DivWdg()
save_content.add(save_third_row)
save_third_row.add_class("save-row")
my_searches_checkbox = CheckboxWdg(name="my_searches")
save_third_row.add(my_searches_checkbox)
my_searches_checkbox.set_checked()
save_third_row.add("<div style='margin: 0 20px 0 8px; display: flex; align-items: center;'>Save to <b style='margin-left: 5px'>My Searches</b></div>")
shared_searches_checkbox = CheckboxWdg(name="shared_searches")
save_third_row.add(shared_searches_checkbox)
save_third_row.add("<div style='margin: 0 20px 0 8px; display: flex; align-items: center;'>Save to <b style='margin-left: 5px'>Shared Searches</b></div>")
save_top.add(self.get_styles())
return save_top
class SaveSearchCmd(Command):
def init(self):
# handle the default
config = self.kwargs.get('config')
self.search_type = self.kwargs.get("search_type")
self.view = self.kwargs.get("view")
assert(self.search_type)
def execute(self):
self.init()
# create the filters
self.filters = []
config = "<config>\n"
config += "<filter>\n"
# get all of the serialized versions of the filters
filter_data = FilterData.get()
json = filter_data.serialize()
value_type = "json"
config += "<values type='%s'>%s</values>\n" % (value_type, json)
config += "</filter>\n"
config += "</config>\n"
# format the xml
xml = Xml()
xml.read_string(config)
if not self.view:
saved_view = "saved_search:%s" % self.search_type
else:
saved_view = self.view
# if self.view.startswith("saved_search:"):
# saved_view = self.view
# else:
# saved_view = "saved_search:%s" % self.view
save_personal = self.kwargs.get("save_personal")
save_shared = self.kwargs.get("save_shared")
save_overwrite = self.kwargs.get("save_overwrite");
# use widget config instead
search = Search('config/widget_config')
search.add_filter("view", saved_view)
search.add_filter("search_type", self.search_type)
search.add_filter("login", "NULL", op="is", quoted=False)
shared_config = search.get_sobject()
search = Search('config/widget_config')
search.add_filter("view", saved_view)
search.add_filter("search_type", self.search_type)
search.add_user_filter()
personal_config = search.get_sobject()
if save_overwrite:
if save_shared:
shared_config.set_value("config", xml.to_string())
shared_config.commit()
else:
personal_config.set_value("config", xml.to_string())
personal_config.commit()
return
if save_shared:
if shared_config:
raise Exception("Shared search with name '%s' already exists." % saved_view)
config = SearchType.create('config/widget_config')
config.set_value("view", saved_view)
config.set_value("search_type", self.search_type)
config.set_value("category", "search_filter")
config.set_value("config", xml.to_string())
config.commit()
if save_personal:
if personal_config:
raise Exception("My search with name '%s' already exists." % saved_view)
config = SearchType.create('config/widget_config')
config.set_value("view", saved_view)
config.set_value("search_type", self.search_type)
config.set_user()
config.set_value("category", "search_filter")
config.set_value("config", xml.to_string())
config.commit()
class AdvancedSearchSavedSearchesWdg(BaseRefreshWdg):
def get_styles(self):
styles = HtmlElement.style('''
/* Saved searches */
.spt_saved_searches_top {
background: #F9F9F9
}
.spt_saved_searches_top .spt_saved_search_item_template {
display: none !important;
}
.spt_saved_searches_top .spt_saved_searches_header {
position: relative;
display: flex;
justify-content: space-between;
align-items: center;
margin: 22px 20px 20px 20px;
}
.spt_saved_searches_top .spt_saved_searches {
display: flex;
align-items: center;
opacity: 1;
transition: 0.25s;
}
.spt_saved_searches_top .spt_saved_searches.gone {
opacity: 0;
}
.spt_saved_searches_top .spt_saved_searches_title {
font-size: 14px;
font-weight: 500;
}
.spt_saved_searches_top .spt_saved_searches .fa {
margin: 0 10px;
}
.spt_saved_searches_top .spt_saved_searches_input {
position: absolute;
border: none;
background: transparent;
border-bottom: 2px solid #f4f4f4;
opacity: 0;
transition: 0.25s;
}
.spt_saved_searches_top .spt_saved_searches_input.visible {
opacity: 1;
}
.spt_saved_searches_top .spt_saved_searches_container {
padding: 5px 0px 20px 0px;
font-size: 11px;
}
.spt_saved_searches_top .spt_saved_searches_item {
padding: 5px 0px 20px 0px;
display: none;
}
.spt_saved_searches_top .spt_saved_searches_item.selected {
display: block;
}
.spt_saved_searches_top .spt_saved_search_category {
font-weight: 500;
padding: 5px 20;
}
.spt_saved_searches_top .spt_saved_searches_container:not(.search) .spt_saved_search_category {
display: none;
}
.spt_saved_searches_top .spt_saved_search_item {
display: flex;
justify-content: space-between;
align-items: center;
width: 100%;
color: #bbb;
padding: 5px 20;
box-sizing: border-box;
}
.spt_saved_searches_top .spt_saved_search_item:hover,
.spt_saved_searches_top .spt_saved_search_item.selected {
background: #eee
}
.spt_saved_searches_top .spt_saved_search_label {
width: 80%;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.spt_saved_searches_top .spt_saved_search_item:hover .spt_saved_search_delete {
display: block;
}
.spt_saved_searches_top .spt_saved_search_delete {
display: none;
}
.spt_saved_searches_top .spt_saved_search_delete:hover {
color: red;
}
.spt_saved_searches_top .spt_search_categories_dropdown {
position: absolute;
top: 30px;
right: 40px;
box-shadow: 0px 2px 4px 0px #bbb;
border-radius: 3px;
background: white;
}
.spt_saved_searches_top .spt_search_category_template {
display: none;
}
.spt_saved_searches_top .spt_search_category {
padding: 8px 20px;
width: 130px;
}
.spt_saved_searches_top .spt_search_category:hover {
background: #ccc;
}
''')
return styles
def get_display(self):
search_type = self.kwargs.get("search_type")
search = Search("config/widget_config")
search.add_op("begin")
search.add_filter("view", 'saved_search:%', op="like")
search.add_filter("category", 'search_filter')
search.add_op("or")
search.add_op("begin")
search.add_user_filter()
search.add_filter("login", "NULL", op="is", quoted=False)
search.add_op("or")
search.add_filter("search_type", search_type)
configs = search.get_sobjects()
categories = {
"my_searches": "My Searches",
"shared_searches": "Shared Searches"
}
values = {
"my_searches": [],
"shared_searches": []
}
labels = {
"my_searches": [],
"shared_searches": []
}
user = Environment.get_user_name()
for config in configs:
login = config.get_value("login")
if login == user:
labels["my_searches"].append(config.get("view"))
values["my_searches"].append(config.get("view"))
else:
labels["shared_searches"].append(config.get("view"))
values["shared_searches"].append(config.get("view"))
# values = [x.get("view") for x in configs]
# labels = [x.get("title") or x.get("view") for x in configs]
#################################################################
saved_top = self.top
saved_top.add_class("spt_saved_searches_top")
saved_top.add_behavior({
'type': 'load',
'values': values,
'labels': labels,
'categories': categories,
'cbjs_action': self.get_onload_js()
})
### saved searches header
saved_header = self.get_header()
saved_top.add(saved_header)
### new container
saved_searches_container = DivWdg()
saved_top.add(saved_searches_container)
saved_searches_container.add_class("spt_saved_searches_container")
saved_searches_container.add_class("SPT_TEMPLATE")
saved_searches_category_container = DivWdg()
saved_searches_container.add(saved_searches_category_container)
saved_searches_category_container.add_class("spt_saved_searches_item")
saved_searches_category_container.add_class("spt_template_item")
saved_searches_category = DivWdg()
saved_searches_category_container.add(saved_searches_category)
saved_searches_category.add_class("spt_saved_search_category")
saved_search_item_container = DivWdg()
saved_searches_category_container.add(saved_search_item_container)
saved_search_item_container.add_class("spt_saved_search_item_container")
saved_search_item = DivWdg()
saved_search_item_container.add(saved_search_item)
saved_search_item.add_class("spt_saved_search_item")
saved_search_item.add_class("spt_saved_search_item_template")
saved_search_item.add_class("spt_template_item hand")
saved_search_label = DivWdg("")
saved_search_item.add(saved_search_label)
saved_search_label.add_class("spt_saved_search_label")
saved_search_delete = DivWdg("<i class='fa fa-trash'></i>")
saved_search_item.add(saved_search_delete)
saved_search_delete.add_class("spt_saved_search_delete")
saved_item_action = self.kwargs.get("saved_item_action") or '''
/*bvr.src_el.addClass("selected");
let value = bvr.src_el.getAttribute("spt_value");
spt.table.load_search(value);*/
let currSelected = bvr.src_el.getParent(".spt_saved_searches_container").getElement(".spt_saved_search_item.selected");
if (currSelected) {
currSelected.removeClass("selected");
}
bvr.src_el.addClass("selected");
let value = bvr.src_el.getAttribute('spt_value');
let category = bvr.src_el.getAttribute('spt_category');
let server = TacticServerStub.get();
let classname = 'tactic.ui.app.GetSavedSearchCmd';
let kwargs = {
view: value,
search_type: bvr.search_type,
category: category
};
server.p_execute_cmd(classname, kwargs)
.then(function(ret_val) {
let search_values_dict = ret_val.info.search_values_dict;
let top = bvr.src_el.getParent('.spt_search_top');
top.removeClass("spt_has_changes");
let refreshPanel = top.getElement('.spt_search');
spt.panel.refresh_element(refreshPanel, {filter: search_values_dict, search_view: value});
});
'''
saved_search_item.add_behavior({
'type': 'click',
'search_type': search_type,
'cbjs_action': saved_item_action
})
saved_search_delete.add_behavior({
'type': 'click',
'search_type': search_type,
'cbjs_action': '''
let item = bvr.src_el.getParent(".spt_saved_search_item");
let label = item.innerText;
let value = item.getAttribute("spt_value");
let confirm = function() {
let key = item.getAttribute("spt_category");
let server = TacticServerStub.get();
let kwargs = {
view: value,
search_type: bvr.search_type,
personal: key == "my_searches"
}
let classname = "tactic.ui.app.DeleteSavedSearchCmd";
server.p_execute_cmd(classname, kwargs)
.then(function(ret_val) {
item.remove();
spt.notify.show_message("Deleted");
spt.advanced_search.saved.delete_item(key, label);
});
}
spt.confirm("Are you sure you want to delete '"+label+"'?", confirm);
'''
})
saved_searches_container.add_behavior({
'type': 'load',
'cbjs_action': '''
let template = bvr.src_el.getElement(".spt_saved_searches_item");
let itemTemplate = template.getElement(".spt_saved_search_item_template");
let allValues = spt.advanced_search.saved.values;
let allLabels = spt.advanced_search.saved.labels;
let categories = spt.advanced_search.saved.categories;
for (var key in categories) {
let values = allValues[key];
let labels = allLabels[key];
let clone = spt.behavior.clone(template);
let category = categories[key];
let categoryDiv = clone.getElement(".spt_saved_search_category");
categoryDiv.innerText = category;
clone.setAttribute("spt_category", key);
clone.removeClass("spt_template_item");
let container = clone.getElement(".spt_saved_search_item_container");
for (let i=0; i<values.length; i++) {
let value = values[i];
let label = labels[i];
let itemClone = spt.behavior.clone(itemTemplate);
let labelDiv = itemClone.getElement(".spt_saved_search_label");
labelDiv.innerText = label;
itemClone.setAttribute("spt_value", value);
itemClone.setAttribute("spt_category", key);
itemClone.removeClass("spt_saved_search_item_template");
itemClone.removeClass("spt_template_item")
container.appendChild(itemClone);
}
clone.removeClass("spt_template_item");
bvr.src_el.appendChild(clone);
}
'''
})
saved_searches_container.add_behavior({
'type': 'load',
'cbjs_action': '''
spt.advanced_search.saved.load_items("my_searches");
'''
})
saved_top.add(self.get_styles())
return saved_top
def get_onload_js(self):
return '''
spt.advanced_search = spt.advanced_search || {};
spt.advanced_search.saved = spt.advanced_search.saved || {};
spt.advanced_search.saved.categories = bvr.categories;
spt.advanced_search.saved.values = bvr.values;
spt.advanced_search.saved.labels = bvr.labels;
'''
# TODO: make categories!!
def get_header(self):
saved_header = DivWdg()
saved_header.add_class("spt_saved_searches_header")
#### my searches (dropdown)
saved_searches_wdg = DivWdg()
saved_header.add(saved_searches_wdg)
saved_searches_wdg.add_class("spt_saved_searches")
saved_searches_wdg.add_class("hand")
saved_searches_wdg.add_behavior({
'type': 'click',
'cbjs_action': '''
//spt.advanced_search.saved.toggle_dropdown();
let header = bvr.src_el.getParent(".spt_saved_searches_header");
let dropdown = header.getElement(".spt_search_categories_dropdown");
spt.body.add_focus_element(dropdown);
dropdown.setStyle("display", "");
'''
})
searches_dropdown = DivWdg()
saved_header.add(searches_dropdown)
searches_dropdown.add_class("spt_search_categories_dropdown")
searches_dropdown.add_style("display: none")
searches_dropdown.add_behavior({
'type': 'load',
'cbjs_action': '''
bvr.src_el.on_complete = function(el) {
bvr.src_el.setStyle("display", "none");
};
let header = bvr.src_el.getParent(".spt_saved_searches_header");
let dropdown = header.getElement(".spt_search_categories_dropdown");
let template = header.getElement(".spt_template_item");
let categories = spt.advanced_search.saved.categories;
for (var key in categories) {
let label = categories[key];
let value = key;
let clone = spt.behavior.clone(template);
clone.innerText = label;
clone.setAttribute("spt_value", value);
clone.removeClass("spt_search_category_template");
clone.removeClass("spt_template_item");
dropdown.appendChild(clone);
}
'''
})
searches_dropdown_item = DivWdg()
searches_dropdown.add(searches_dropdown_item)
searches_dropdown_item.add_class("spt_search_category")
searches_dropdown_item.add_class("spt_search_category_template")
searches_dropdown_item.add_class("spt_template_item hand")
searches_dropdown_item.add_behavior({
'type': 'click',
'cbjs_action': '''
let header = bvr.src_el.getParent(".spt_saved_searches_header");
let dropdown = header.getElement(".spt_search_categories_dropdown");
let title = header.getElement(".spt_saved_searches_title");
let value = bvr.src_el.getAttribute("spt_value");
let label = bvr.src_el.innerText;
title.innerText = label;
//spt.advanced_search.saved.clear_items();
spt.advanced_search.saved.load_items(value);
spt.body.remove_focus_element(dropdown);
dropdown.on_complete();
'''
})
saved_searches_title = DivWdg("My Searches")
saved_searches_wdg.add(saved_searches_title)
saved_searches_title.add_class("spt_saved_searches_title")
saved_searches_wdg.add("<i class='fa fa-angle-down'></i>")
#### my searches (input)
saved_searches_search = DivWdg("<i class='fa fa-search'></i>")
saved_header.add(saved_searches_search)
saved_searches_search.add_class("spt_saved_searches_search hand")
saved_searches_search.add_behavior({
'type': 'click',
'cbjs_action': '''
let searches_top = bvr.src_el.getParent(".spt_saved_searches_top");
let saved_searches = searches_top.getElement(".spt_saved_searches");
let searches_input = searches_top.getElement(".spt_saved_searches_input");
searches_input.setStyle("display", "");
searches_input.addClass("visible");
saved_searches.addClass("gone");
spt.body.add_focus_element(searches_input);
searches_input.focus();
'''
})
saved_searches_input = HtmlElement.text()
saved_header.add(saved_searches_input)
saved_searches_input.add_class("spt_saved_searches_input")
saved_searches_input.add_style("display", "none")
saved_searches_input.add_attr("placeholder", "Find saved search")
saved_searches_input.add_behavior({
'type': 'load',
'cbjs_action': '''
let searches_top = bvr.src_el.getParent(".spt_saved_searches_top");
let saved_searches = searches_top.getElement(".spt_saved_searches");
bvr.src_el.on_complete = function(el) {
let top = bvr.src_el.getParent(".spt_saved_searches_top");
let container = top.getElement(".spt_saved_searches_container");
container.removeClass("search");
let searchesItems = top.getElements(".spt_saved_searches_item");
searchesItems.forEach(function(searchesItem) {
searchesItem.setStyle("display", "");
let searchItems = searchesItem.getElements(".spt_saved_search_item");
searchItems.forEach(function(searchItem){
searchItem.setStyle("display", "");
});
});
el.removeClass("visible");
saved_searches.removeClass("gone");
setTimeout(function(){
el.setStyle("display", "none");
}, 250);
}
'''
})
saved_searches_input.add_behavior({
'type': 'keyup',
'cbjs_action': '''
let value = bvr.src_el.value;
let top = bvr.src_el.getParent(".spt_saved_searches_top");
let container = top.getElement(".spt_saved_searches_container");
container.addClass("search");
let searchesItems = top.getElements(".spt_saved_searches_item");
searchesItems.forEach(function(searchesItem) {
let searchItems = searchesItem.getElements(".spt_saved_search_item");
let display = "none";
searchItems.forEach(function(searchItem){
if (searchItem.hasClass("spt_template_item")) return;
let label = searchItem.getElement(".spt_saved_search_label");
if (label.innerText.includes(value)) {
searchItem.setStyle("display", "");
display = "block";
} else searchItem.setStyle("display", "none");
});
searchesItem.setStyle("display", display);
});
'''
})
return saved_header
class GetSavedSearchCmd(Command):
def execute(self):
view = self.kwargs.get("view")
search_type = self.kwargs.get("search_type")
category = self.kwargs.get("category")
search = Search("config/widget_config")
# search.add_op("begin")
search.add_filter("view", view)
search.add_filter("category", 'search_filter')
if category:
if category == "my_searches":
search.add_user_filter()
elif category == "shared_searches":
search.add_filter("login", "NULL", op="is", quoted=False)
# search.add_op("or")
# search.add_op("begin")
# search.add_user_filter()
# search.add_filter("login", "NULL", op="is", quoted=False)
# search.add_op("or")
search.add_filter("search_type", search_type)
config_sobj = search.get_sobject()
data = {}
if config_sobj:
config_xml = config_sobj.get_xml_value("config")
from pyasm.widget import WidgetConfig, WidgetConfigView
config = WidgetConfig.get(view=view, xml=config_xml)
data = config_xml.get_value("config/filter/values")
self.info['search_values_dict'] = data
class DeleteSavedSearchCmd(Command):
def execute(self):
view = self.kwargs.get("view")
search_type = self.kwargs.get("search_type")
personal = self.kwargs.get("personal")
search = Search("config/widget_config")
# search.add_op("begin")
search.add_filter("view", view)
search.add_filter("category", 'search_filter')
# search.add_op("or")
# search.add_op("begin")
if personal:
search.add_user_filter()
else:
search.add_filter("login", "NULL", op="is", quoted=False)
# search.add_op("or")
search.add_filter("search_type", search_type)
config = search.get_sobject()
self.info['deleted'] = False
if config:
config.delete()
self.info['deleted'] = True
class AdvancedSearchSaveButtonsWdg(BaseRefreshWdg):
def get_styles(self):
styles = HtmlElement.style('''
/* Buttons */
.spt_advanced_search_buttons {
display: flex;
justify-content: space-between;
align-items: center;
padding: 10px 15px;
width: 100%;
box-sizing: border-box;
}
.spt_advanced_search_buttons .save-buttons {
display: flex;
}
.spt_advanced_search_buttons .save-button {
padding: 5px;
}
.spt_advanced_search_buttons .save-button.enabled:hover {
//background: #f4f4f4;
}
.spt_advanced_search_buttons .spt_search_button {
background: #999;
color: #f4f4f4;
border-radius: 3px;
padding: 6px 14px;
}
.spt_search_top:not(.spt_has_changes) .spt_advanced_search_buttons .save-button[spt_action='save'] {
color: #ccc;
cursor: default;
}
''')
return styles
def get_display(self):
hide_save_buttons = self.kwargs.get("hide_save_buttons")
prefix = self.kwargs.get("prefix")
mode = self.kwargs.get("mode")
buttons_container = self.top
buttons_container.add_class("spt_advanced_search_buttons")
self.add_relay_behaviors(buttons_container)
if hide_save_buttons not in ["true", True]:
# Save buttons
save_buttons = DivWdg()
buttons_container.add(save_buttons)
save_buttons.add_class("save-buttons")
save_button = DivWdg("Save")
save_buttons.add(save_button)
save_button.add_class("spt_save_button spt_save save-button enabled hand")
save_button.add_style("margin-right: 5px;")
save_as_button = DivWdg("Save As")
save_buttons.add(save_as_button)
save_as_button.add_class("spt_save_button spt_save_as save-button enabled hand ")
save_as_button.add_attr("spt_action", "save_as")
if mode == "save":
save_button.add_attr("spt_action", "save_as")
save_as_button.add_style("display: none")
else:
save_button.add_attr("spt_action", "save")
# Search button
search_button = DivWdg("Search")
buttons_container.add(search_button)
search_button.add_class("spt_search_button")
search_button.add_class("hand")
search_action = self.kwargs.get("search_action")
if not search_action:
top_class = self.kwargs.get("top_class")
if top_class:
search_action = '''
var top = bvr.src_el.getParent(".%s");
var panel = top.getElement(".spt_view_panel");
bvr.panel = panel;
spt.dg_table.search_cbk(evt, bvr);
''' % top_class
else:
search_action = '''
spt.dg_table.search_cbk(evt, bvr);
'''
search_button.add_behavior({
'type': 'click_up',
'new_search': True,
'cbjs_action': search_action,
#'panel_id': prefix,
})
buttons_container.add(self.get_styles())
return buttons_container
def add_relay_behaviors(self, top):
top.add_relay_behavior({
'type': 'click',
'bvr_match_class': 'spt_save_button',
'search_type': self.kwargs.get("search_type"),
'cbjs_action': '''
let top = bvr.src_el.getParent(".spt_search_top");
spt.advanced_search.set_top(top);
let action = bvr.src_el.getAttribute("spt_action");
if (action == "save_as") {
let overlay = top.getElement(".overlay");
let saveTop = top.getElement(".spt_save_top");
overlay.setStyle("display", "");
overlay.addClass("visible");
saveTop.addClass("visible");
saveTop.getElement(".spt_save_title").innerText = bvr.src_el.innerText;
} else if (action == "save") {
if (!top.hasClass("spt_has_changes")) return;
var selected = spt.advanced_search.saved.get_selected();
if (!selected) {
spt.alert("No search item selected");
return;
}
var save_personal = selected.getAttribute("spt_category") == "my_searches";
var save_shared = !save_personal;
var value = selected.getAttribute("spt_value");
var new_values = spt.advanced_search.generate_json();
var search_values_dict = JSON.stringify(new_values);
var options = {
'search_type': bvr.search_type,
'display': 'block',
'view': value,
'save_personal': save_personal,
'save_shared': save_shared,
'save_overwrite': true
};
// replace the search widget
var server = TacticServerStub.get();
let on_complete = function(ret_val) {
spt.notify.show_message("Search saved");
top.removeClass("spt_has_changes");
}
var class_name = "tactic.ui.app.SaveSearchCmd";
server.execute_cmd(class_name, options, search_values_dict, {on_complete: on_complete});
}
'''
})
| epl-1.0 | -6,390,547,911,114,472,000 | 31.781165 | 162 | 0.528056 | false |
eci/mezzanine-people | mezzanine_people/templatetags/people_tags.py | 1 | 1656 | from django.db.models import Count
from .models import Person, PersonCategory
from mezzanine import template
register = template.Library()
@register.as_tag
def people_categories(*args):
"""
Put a list of categories for people into the template context.
"""
people = Person.objects.published()
categories = PersonCategory.objects.filter(people__in=people)
return list(categories.annotate(people_count=Count("people")))
#
# Get Random People (templatetag)
#
class RandomPeople(template.Node):
def __init__(self, limit, var_name):
self.limit = limit
self.var_name = var_name
def render(self, context):
random_people = Person.objects.order_by("?")[:int(self.limit)]
if random_people and (int(self.limit) == 1):
context[self.var_name] = random_people[0]
else:
context[self.var_name] = random_people
return ""
@register.tag(name='get_random_people')
def do_get_random_people(parser, token):
"""
Gets any number of people randomly and stores them in a variable.
Syntax::
{% get_random_people [limit] as [var_name] %}
Example usage::
{% get_random_people 10 as featured_people_list %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%s tag requires arguments" % token.contents.split()[0]
m = re.search(r'(.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError, "%s tag had invalid arguments" % tag_name
format_string, var_name = m.groups()
return RandomPeople(format_string[0], var_name)
| mit | 7,240,576,681,706,077,000 | 27.551724 | 99 | 0.649758 | false |
GliderGeek/PySoar | PySoar/exportClass.py | 1 | 14631 | import datetime
import xlwt
from opensoar.utilities.helper_functions import add_times
def ss2hhmmss(time_ss, colon=True):
if time_ss is None:
return None
seconds = (time_ss % 3600) % 60
minutes = ((time_ss % 3600) - seconds) / 60
hours = (time_ss - (time_ss % 3600)) / 3600
if colon:
return "%02d:%02d:%02d" % (hours, minutes, seconds)
else:
return "%02d%02d%02d" % (hours, minutes, seconds)
class ExcelExport(object):
def initiate_style_dict(self):
self.style_dict['text'] = xlwt.easyxf('font: name Times New Roman')
self.style_dict['text_bst'] = xlwt.easyxf('font: name Times New Roman, bold on; pattern: pattern solid, fore_colour light_green')
self.style_dict['text_wrst'] = xlwt.easyxf('font: name Times New Roman, bold on; pattern: pattern solid, fore_colour rose')
self.style_dict['number'] = xlwt.easyxf('font: name Times New Roman, bold off', num_format_str='#,##0.00')
self.style_dict['number_best'] = xlwt.easyxf('font: name Times New Roman, bold on; pattern: pattern solid, fore_colour light_green', num_format_str='#,##0.00')
self.style_dict['number_worst'] = xlwt.easyxf('font: name Times New Roman, bold on; pattern: pattern solid, fore_colour rose', num_format_str='#,##0.00')
self.style_dict['int'] = xlwt.easyxf('font: name Times New Roman, bold off', num_format_str='#,##0.')
self.style_dict['int_best'] = xlwt.easyxf('font: name Times New Roman, bold on; pattern: pattern solid, fore_colour light_green', num_format_str='#,##0.')
self.style_dict['int_worst'] = xlwt.easyxf('font: name Times New Roman, bold on; pattern: pattern solid, fore_colour rose', num_format_str='#,##0.')
self.style_dict['style_phase'] = xlwt.easyxf('font: name Arial, bold on; pattern: pattern solid, fore_colour yellow; align: horiz center')
self.style_dict['performance_names'] = xlwt.easyxf('font: name Arial, bold on; align: rotation 90, horiz center')
self.style_dict['units'] = xlwt.easyxf('font: name Arial, bold on; align: horiz center')
def initiate_labels(self, settings):
for perf_ind in settings.perf_indic_all:
self.labels_all.append(settings.perf_dict[perf_ind]["name"])
if settings.perf_dict[perf_ind]["visible_on_leg"]:
self.labels_leg.append(settings.perf_dict[perf_ind]["name"])
def fill_best_worst_bib(self, leg, settings):
for perf_ind in settings.perf_indic_all:
if leg != -1 and not settings.perf_dict[perf_ind]["visible_on_leg"]:
continue
if leg == -1:
self.best_parameters_all[perf_ind] = ""
self.worst_parameters_all[perf_ind] = ""
else:
self.best_parameters_leg[leg][perf_ind] = ""
self.worst_parameters_leg[leg][perf_ind] = ""
def initiate_best_worst(self, settings, no_legs):
self.fill_best_worst_bib(-1, settings)
for leg in range(no_legs):
self.best_parameters_leg.append({})
self.worst_parameters_leg.append({})
self.fill_best_worst_bib(leg, settings)
def __init__(self, settings, no_legs):
self.file_name = settings.file_name
self.wb = xlwt.Workbook(encoding='latin-1') # initialize excel sheet
self.ws_all = self.wb.add_sheet('Entire Flight', cell_overwrite_ok=True)
self.ws_legs = []
for leg in range(no_legs):
self.ws_legs.append(self.wb.add_sheet("Leg " + str(leg+1), cell_overwrite_ok=True))
self.style_dict = {}
self.initiate_style_dict()
self.labels_all = []
self.labels_leg = []
self.initiate_labels(settings)
# store filenames corresponding to perf indicators
self.best_parameters_all = {}
self.best_parameters_leg = []
self.worst_parameters_all = {}
self.worst_parameters_leg = []
self.initiate_best_worst(settings, no_legs)
def determine_best_worst(self, competition_day, settings):
for perf_ind in settings.perf_indic_all:
order = settings.perf_dict[perf_ind]["order"]
if order == 'neutral':
continue
temp_best = 0
temp_worst = 0
for competitor in competition_day.competitors:
if not settings.perf_dict[perf_ind]["visible_on_entire_flight"]: # continue to next performance indicator
continue
if competitor.trip.outlanded():
continue
if competitor.performance.no_thermals == 0 and not settings.perf_dict[perf_ind]["visible_only_cruise"]:
continue
value = competitor.performance.all[perf_ind]
filename = competitor.competition_id
# initiate values
if (order == 'high' or order == 'low') and temp_best == 0:
temp_best = value
temp_worst = value
self.best_parameters_all[perf_ind] = filename
self.worst_parameters_all[perf_ind] = filename
# check for best value
if order == "high" and (value > temp_best or (value < 0 and value < temp_best)):
temp_best = value
self.best_parameters_all[perf_ind] = filename
elif order == "low" and value < temp_best:
temp_best = value
self.best_parameters_all[perf_ind] = filename
# check for worst value
if order == 'high' and 0 < value < temp_worst:
temp_worst = value
self.worst_parameters_all[perf_ind] = filename
elif order == "low" and value > temp_worst:
temp_worst = value
self.worst_parameters_all[perf_ind] = filename
if not settings.perf_dict[perf_ind]["visible_on_leg"]: # continue to next performance indicator
continue
for leg in range(competition_day.task.no_legs):
temp_best = 0
temp_worst = 0
for competitor in competition_day.competitors:
if competitor.trip.outlanded() and competitor.trip.outlanding_leg() < leg:
continue
elif competitor.trip.outlanded()\
and competitor.trip.outlanding_leg() == leg\
and not settings.perf_dict[perf_ind]["visible_on_outlanding"]:
continue
if competitor.performance.no_thermals_leg[leg] == 0 and not settings.perf_dict[perf_ind]["visible_only_cruise"]:
continue
value = competitor.performance.leg[leg][perf_ind]
filename = competitor.competition_id
if (order == 'high' or order == 'low') and temp_best == 0:
temp_best = value if value is not None else 0
temp_worst = value if value is not None else 0
self.best_parameters_leg[leg][perf_ind] = filename
self.worst_parameters_leg[leg][perf_ind] = filename
# check for best value
if value is not None:
if order == "high" and (value > temp_best or (value < 0 and value < temp_best)):
temp_best = value
self.best_parameters_leg[leg][perf_ind] = filename
elif order == "low" and value < temp_best:
temp_best = value
self.best_parameters_leg[leg][perf_ind] = filename
# check for worst value
if order == 'high' and 0 < value < temp_worst:
temp_worst = value
self.worst_parameters_leg[leg][perf_ind] = filename
elif order == "low" and value > temp_worst:
temp_worst = value
self.worst_parameters_leg[leg][perf_ind] = filename
def write_general_info(self, date):
self.ws_all.write(0, 0, date.strftime('%d-%m-%y'))
def write_cell(self, leg, row, col, content, style):
if leg == -1:
self.ws_all.write(row, col, content, style)
else:
self.ws_legs[leg].write(row, col, content, style)
def style_addition(self, leg, perf_ind, filename):
if leg == -1:
if self.best_parameters_all[perf_ind] == filename:
return "_best"
elif self.worst_parameters_all[perf_ind] == filename:
return "_worst"
else:
return ""
else:
if self.best_parameters_leg[leg][perf_ind] == filename:
return "_best"
elif self.worst_parameters_leg[leg][perf_ind] == filename:
return "_worst"
else:
return ""
def write_perf_indics(self, leg, settings, competition_day):
col = 0
for perf_ind in settings.perf_indic_all:
if leg != -1 and not settings.perf_dict[perf_ind]["visible_on_leg"]:
continue
if leg == -1 and not settings.perf_dict[perf_ind]["visible_on_entire_flight"]:
continue
row = 1
content = settings.perf_dict[perf_ind]['name']
style = self.style_dict['performance_names']
perf_format = settings.perf_dict[perf_ind]['format']
self.write_cell(leg, row, col, content, style)
row += 1
content = settings.perf_dict[perf_ind]['unit']
style = self.style_dict['units']
perf_format = settings.perf_dict[perf_ind]['format']
self.write_cell(leg, row, col, content, style)
row += 1 # empty line
for competitor in competition_day.competitors:
row += 1
if leg == -1:
if competitor.trip.outlanded() and not settings.perf_dict[perf_ind]["visible_on_outlanding"]\
or competitor.performance.no_thermals == 0 and not settings.perf_dict[perf_ind]["visible_only_cruise"]:
continue
else:
if perf_ind == 'ranking':
content = competitor.ranking
elif perf_ind == 'airplane':
content = competitor.plane_model
elif perf_ind == 'compID':
content = competitor.competition_id
else:
content = competitor.performance.all[perf_ind]
else:
if competitor.trip.outlanded() and competitor.trip.outlanding_leg() < leg or\
competitor.trip.outlanded() and competitor.trip.outlanding_leg() <= leg and not settings.perf_dict[perf_ind]["visible_on_outlanding"] or\
competitor.performance.no_thermals_leg[leg] == 0 and not settings.perf_dict[perf_ind]["visible_only_cruise"]:
continue
else:
if perf_ind == 'ranking':
content = competitor.ranking
elif perf_ind == 'airplane':
content = competitor.plane_model
elif perf_ind == 'compID':
content = competitor.competition_id
else:
content = competitor.performance.leg[leg][perf_ind]
if perf_ind in ['t_start', 't_finish']:
timezone = competition_day.task.timezone
if timezone is not None:
content = add_times(content, datetime.timedelta(hours=timezone))
content = content.strftime('%H:%M:%S')
style = self.style_dict[perf_format + self.style_addition(leg, perf_ind, competitor.competition_id)]
self.write_cell(leg, row, col, content, style)
col += 1
def write_title(self, leg, settings, taskpoints):
row = 0
col = 1
if leg == -1:
no_cols = settings.no_indicators
title = "Entire flight"
self.ws_all.write_merge(row, row, col, col+no_cols, title, self.style_dict['style_phase'])
else:
no_cols = settings.no_leg_indicators
title = "Leg %s: %s - %s" % (leg + 1, taskpoints[leg].name, taskpoints[leg+1].name)
self.ws_legs[leg].write_merge(row, row, col, col+no_cols, title, self.style_dict['style_phase'])
def write_whole_flight(self, settings, competition_day):
self.write_title(-1, settings, competition_day.task.waypoints)
self.write_perf_indics(-1, settings, competition_day)
def write_legs(self, settings, competition_day):
for leg in range(competition_day.task.no_legs):
self.write_title(leg, settings, competition_day.task.waypoints)
self.write_perf_indics(leg, settings, competition_day)
def write_file(self, competition_day, settings, igc_directory):
self.write_general_info(competition_day.date)
self.determine_best_worst(competition_day, settings)
self.write_whole_flight(settings, competition_day)
self.write_legs(settings, competition_day)
self.wb.save(self.file_name)
self.wb.save(igc_directory +('/')+self.file_name.split('/')[-1])
############################# LICENSE #####################################
# PySoar - Automating gliding competition analysis
# Copyright (C) 2016 Matthijs Beekman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
| gpl-3.0 | -7,354,456,287,760,471,000 | 43.880368 | 167 | 0.552662 | false |
richardseifert/Hydra_pipeline | libs/spectra.py | 1 | 12705 | import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
plt.ion()
from astropy.io import fits
from fitstools import common_header
from html_plot import plotter
def unpack_xy(use_args='all', preserve=False):
def decorator(f):
def wrapper(use_args, *args, **kwargs):
args = list(args)
if use_args == 'all':
use_args = [i for i in range(len(args))]
dtype = None
for i in use_args:
#if isinstance(args[i], spectrum):
# if d<2:
# dtype = lambda w, f, ferr=None, header=None: spectrum(w, f, ferr, header) #Tried adding spectrum object to
# d = 2
if isinstance(args[i], curve):
dtype = lambda x, y, yerr=None: curve(x, y, yerr)
else:
try:
iter(args[i])
except TypeError:
continue
if len(args[i]) == 3:
x, y, yerr = args[i]
args[i] = curve(x, y, yerr)
elif len(args[i]) == 2:
x, y = args[i]
args[i] = curve(x, y)
else:
continue
res = f(*args, **kwargs)
if preserve and dtype != None:
res = dtype(*res)
return res
return lambda *args, **kwargs: wrapper(use_args, *args, **kwargs)
return decorator
class curve:
def __init__(self, x, y, yerr=None):
sort_i = np.argsort(x) #Sort data by x.
self.x = np.asarray(x)[sort_i]
self.y = np.asarray(y)[sort_i]
if type(yerr) == type(None):
self.yerr = np.zeros_like(y)[sort_i]
else:
self.yerr = np.asarray(yerr)[sort_i]
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_yerr(self):
return self.yerr
def get_data(self):
return self.x, self.y, self.yerr
@unpack_xy()
def math_helper(c1, c2, **kwargs):
if isinstance(c2, curve):
x_interp = get_x_interp([c1.x, c2.x], **kwargs)
c1_y_interp = interp1d(c1.x, c1.y)(x_interp)
c1_yerr_interp = interp1d(c1.x, c1.yerr)(x_interp)
c1_interp = curve(x_interp, c1_y_interp, c1_yerr_interp)
c2_y_interp = interp1d(c2.x, c2.y)(x_interp)
c2_yerr_interp = interp1d(c2.x, c2.yerr)(x_interp)
c2_interp = curve(x_interp, c2_y_interp, c2_yerr_interp)
return c1_interp, c2_interp
else:
return c1, curve(c1.x, c2*np.ones_like(c1.y))
def __add__(self, other, **kwargs):
self_interp, other_interp = curve.math_helper(self, other, **kwargs)
x_interp = self_interp.x
y_interp = self_interp.y+other_interp.y
yerr_interp = (self_interp.yerr**2+other_interp.yerr**2)**0.5
return curve(x_interp, y_interp, yerr_interp)
def __sub__(self, other, **kwargs):
self_interp, other_interp = curve.math_helper(self, other, **kwargs)
x_interp = self_interp.x
y_interp = self_interp.y-other_interp.y
yerr_interp = (self_interp.yerr**2+other_interp.yerr**2)**0.5
return curve(x_interp, y_interp, yerr_interp)
def __mul__(self, other, **kwargs):
self_interp, other_interp = curve.math_helper(self, other, **kwargs)
x_interp = self_interp.x
y_interp = self_interp.y*other_interp.y
yerr_interp = ((self_interp.yerr*other_interp.y)**2 + (other_interp.yerr*other_interp.y)**2)**0.5
return curve(x_interp, y_interp, yerr_interp)
def __div__(self, other, **kwargs):
self_interp, other_interp = curve.math_helper(self, other, **kwargs)
x_interp = self_interp.x
y_interp = self_interp.y/other_interp.y
yerr_interp = ((self_interp.yerr*other_interp.y)**2 + (other_interp.yerr*other_interp.y)**2)**0.5
return curve(x_interp, y_interp, yerr_interp)
def get_x_interp(x_arrs, x_interp=None, x_interp_i=None, dx=None, **kwargs):
if x_interp == None:
try:
x_interp = x_arrs[x_interp_i]
except TypeError, IndexError:
low = max([min(x_arr) for x_arr in x_arrs]) #Find the lowest x value
high = min([max(x_arr) for x_arr in x_arrs]) #Find the highest x value
if dx != None:
x_interp = np.arange(low, high, dx)
else:
x_interp = []
num_x = len(x_arrs)
x_i_list = [0]*num_x
current_x = low
while current_x < high:
x_interp.append(current_x)
avg_dx = 0
n = 0
for i,x in enumerate(x_arrs):
indx = x_i_list[i]
while indx < len(x) and x[indx] < current_x:
indx += 1
x_i_list[i] = int(indx)
try:
avg_dx += abs(x[indx+1] - x[indx])
n+=1
except:
pass
avg_dx = avg_dx/n if n>0 else last_dx
current_x += avg_dx
last_dx = avg_dx
return x_interp
@unpack_xy()
def interp_helper(*xy_curves, **kwargs):
x_arrs = [c.get_x() for c in xy_curves]
y_arrs = [c.get_y() for c in xy_curves]
yerr_arrs = [c.get_yerr() for c in xy_curves]
x_interp = get_x_interp(x_arrs=x_arrs, **kwargs)
y_interp_arrs = np.zeros((len(y_arrs), len(x_interp)))
for i in range(len(x_arrs)):
y_interp_arrs[i,:] = interp1d(x_arrs[i], y_arrs[i], fill_value=(np.nan, np.nan))(x_interp)
yerr_interp_arrs = np.zeros((len(yerr_arrs), len(x_interp)))
for i in range(len(x_arrs)):
yerr_interp_arrs[i,:] = interp1d(x_arrs[i], yerr_arrs[i], fill_value=(np.nan, np.nan))(x_interp)
return x_interp, y_interp_arrs, yerr_interp_arrs
@unpack_xy(preserve=True)
def interp_add(*spectra, **kwargs):
x_interp, y_interp_arrs, yerr_interp_arrs = interp_helper(*spectra, **kwargs)
y_interp = np.nansum(y_interp_arrs, axis=0)
yerr_interp = np.nansum([yerr**2 for yerr in yerr_interp_arrs], axis=0)**0.5
return x_interp, y_interp
@unpack_xy(preserve=True)
def interp_mean(*spectra, **kwargs):
x_interp, y_interp_arrs, yerr_interp_arrs = interp_helper(*spectra, **kwargs)
y_interp = np.nanmean(y_interp_arrs, axis=0)
yerr_interp = np.nansum([yerr**2 for yerr in yerr_interp_arrs], axis=0)**0.5/N
return x_interp, y_interp
def robust_mean(y_vals, y_errs, m=5):
y_vals = np.array(y_vals)
y_errs = np.array(y_errs)
c = np.nanmedian(y_vals)
keep = (abs(y_vals - c) < m*y_errs)
if len(y_vals[keep]) > 0:
try:
mean = np.average(y_vals[keep], weights=1/y_errs[keep])
except ZeroDivisionError:
mean = np.nanmean(y_vals[keep])
else:
mean = np.nanmean(y_vals)
return mean
@unpack_xy(preserve=True)
def interp_rmean(*spectra, **kwargs):
x_interp, y_interp_arrs, yerr_interp_arrs = interp_helper(*spectra, **kwargs)
y_interp = [robust_mean([y[i] for y in y_interp_arrs], [yerr[i] for yerr in yerr_interp_arrs]) for i in range(len(x_interp))]
yerr_interp = np.nansum([yerr**2 for yerr in yerr_interp_arrs], axis=0)**0.5
return x_interp, y_interp, yerr_interp
@unpack_xy(preserve=True)
def interp_median(*spectra, **kwargs):
x_interp, y_interp_arrs, yerr_interp_arrs = interp_helper(*spectra, **kwargs)
y_interp = np.nanmedian(y_interp_arrs, axis=0)
N = len(y_interp_arrs)
yerr_interp = 1.253*np.nansum([yerr**2 for yerr in yerr_interp_arrs], axis=0)**0.5/N
return x_interp, y_interp, yerr_interp
class spectrum(curve):
def __init__(self, wavelength, flux=None, flux_err=None, header=None):
if type(flux) == type(None) and isinstance(wavelength, curve):
input_curve = wavelength
curve.__init__(self, *input_curve.get_data())
else:
curve.__init__(self, wavelength, flux, flux_err)
self.header = header
def set_header(self, new_header):
self.header = new_header
def get_wavelength(self):
return self.x
def get_flux(self):
return self.y
def get_flux_err(self):
return self.yerr
def get_data(self):
return [self.x, self.y, self.yerr]
def get_header(self):
return self.header
def __add__(self, other, header_i=None):
if header_i == None:
try:
headers = [self.header, other.header]
header = common_header(headers)
except AttributeError:
header = self.header
return spectrum(curve.__add__(self, other), header=header)
def __sub__(self, other, header_i=None):
if header_i == None:
try:
headers = [self.header, other.header]
header = common_header(headers)
except AttributeError:
header = self.header
return spectrum(curve.__sub__(self, other), header=header) #None is temp, REMOVE SOON
def __mul__(self, other, header_i=None):
if header_i == None:
try:
headers = [self.header, other.header]
header = common_header(headers)
except AttributeError:
header = self.header
return spectrum(curve.__mul__(self, other), header=header)
def __div__(self, other, header_i=None):
if header_i == None:
try:
headers = [self.header, other.header]
header = common_header(headers)
except AttributeError:
header = self.header
return spectrum(curve.__div__(self, other), header=header)
def save(self, savepath):
flux = fits.PrimaryHDU(self.get_flux(), self.get_header())
flux.header['EXTNAME'] = 'FLUX'
wavelength = fits.ImageHDU(self.get_wavelength())
wavelength.header['EXTNAME'] = 'WAVELENGTH'
flux_err = fits.ImageHDU(self.get_flux_err())
flux_err.header['EXTNAME'] = 'FLUX_ERR'
f = fits.HDUList([flux, wavelength, flux_err])
f.writeto(savepath, clobber=True)
def plot(self, p=None, **kwargs):
'''
#Old matplotlib method.
if ax == None:
fig, ax = plt.subplots()
ax.set_xlabel('Wavelength ($\AA$)')
ax.set_ylabel('Flux')
ax.plot(self.x, self.y, **kwargs)
if type(self.yerr) != type(None):
ax.fill_between(self.x, self.y-self.yerr, self.y+self.yerr, facecolor='cornflowerblue', linewidth=0.0)
return ax
'''
if p == None:
p = plotter()
p.set_xlabel('Wavelength (Ang)')
p.set_ylabel('Flux')
p.line(self.x, self.y, **kwargs)
if type(self.yerr) != type(None):
if 'line_color' in kwargs:
color = kwargs['line_color']
else:
color = 'blue'
p.fill_between(self.x, self.y-self.yerr, self.y+self.yerr, line_width=0.0, fill_color=color, line_color=color, fill_alpha=0.2, line_alpha=0.2)
return p
def sum_spectra(spectra, header=None, **kwargs):
if header==None:
#Combine headers somehow
pass
sum_curve = interp_add(*spectra, **kwargs)
sum_spectrum = spectrum(sum_curve, header=header)
return sum_spectrum
def median_spectra(spectra, header=None, **kwargs):
if header==None:
#Combine headers somehow
pass
median_curve = interp_median(*spectra, **kwargs)
median_spectrum = spectrum(median_curve, header=header)
return median_spectrum
def mean_spectra(spectra, header=None, **kwargs):
if header==None:
#Combine headers somehow
pass
mean_curve = interp_mean(*spectra, **kwargs)
mean_spectrum = spectrum(mean_curve, header=header)
return mean_spectrum
def rmean_spectra(spectra, header=None, **kwargs):
if header==None:
#Combine headers somehow
pass
rmean_curve = interp_rmean(*spectra, **kwargs)
rmean_spectrum = spectrum(rmean_curve, header=header)
return rmean_spectrum
def scale_spectra(spectra, method='median'):
if method == 'median':
statistic = np.nanmedian
scaled_spectra = []
scale_value = statistic([statistic(sp.get_flux()) for sp in spectra])
for sp in spectra:
scaled_spectra.append(sp*(scale_value/statistic(sp.get_flux())))
return scaled_spectra
| mit | 500,354,099,611,034,300 | 38.703125 | 154 | 0.555136 | false |
mdtraj/tftraj | examples/profile/profile.py | 1 | 1144 | import timeit
import mdtraj as md
import numpy as np
import tensorflow as tf
import tftraj.rmsd_op
import tftraj.rmsd
results = {}
sess = tf.Session()
traj = md.load(['fs_peptide/trajectory-{}.xtc'.format(i + 1) for i in range(28)], top='fs_peptide/fs-peptide.pdb')
traj = traj[::100]
traj_xyz = np.array(traj.xyz)
traj_target = traj[::100]
traj_target_xyz = np.array(traj_target.xyz)
print(len(traj_xyz), len(traj_target_xyz))
rmsd = tftraj.rmsd_op.load()
prmsd, _ = rmsd.pairwise_msd(traj_xyz, traj_target_xyz)
results['tf-cpu'] = timeit.timeit('sess.run(prmsd)', number=30, globals=globals()) / 30
results['mdtraj'] = timeit.timeit('[md.rmsd(traj, traj_target, i) ** 2 for i in range(traj_target.n_frames)]',
number=30, globals=globals()) / 30
tfnative = tftraj.rmsd.pairwise_msd(tf.constant(traj_xyz), tf.constant(traj_target_xyz))
results['tf-native'] = timeit.timeit('sess.run(tfnative)', number=1, globals=globals())
print("{:10s} {:7s}".format("Algo", "time/ms"))
print("{:10s} {:7s}".format('-' * 10, '-' * 7))
for k in sorted(results):
print("{:10s} {:7.1f}".format(k, 1000 * results[k]))
| mit | 1,208,493,363,684,150,500 | 34.75 | 114 | 0.657343 | false |
awacha/cct | attic/gui/toolframes/accounting.py | 1 | 3520 | import logging
from ..core.functions import update_comboboxtext_choices
from ..core.toolframe import ToolFrame
from ...core.services.accounting import Accounting, PrivilegeLevel
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class AccountingFrame(ToolFrame):
def __init__(self, *args, **kwargs):
self._acctconn = []
self._projectid_changed_disable = None
self._updating_privilegeselector = False
super().__init__(*args, **kwargs)
def init_gui(self, *args, **kwargs):
self.on_user_changed(self.instrument.services['accounting'],
self.instrument.services['accounting'].get_user())
self.on_accounting_privlevel_changed(self.instrument.services['accounting'],
self.instrument.services['accounting'].get_privilegelevel())
self._acctconn = [self.instrument.services['accounting'].connect('project-changed', self.on_project_changed),
self.instrument.services['accounting'].connect('privlevel-changed',
self.on_accounting_privlevel_changed),
self.instrument.services['accounting'].connect('user-changed', self.on_user_changed)]
self.on_project_changed(self.instrument.services['accounting'])
def cleanup(self):
for c in self._acctconn:
self.instrument.services['accounting'].disconnect(c)
self._acctconn = []
return super().cleanup()
def on_projectid_changed(self, comboboxtext):
if self._projectid_changed_disable:
return
pid = comboboxtext.get_active_text()
if self.instrument.services['accounting'].get_project().projectid != pid:
self.instrument.services['accounting'].select_project(pid)
def on_project_changed(self, accountingservice: Accounting):
pidsel = self.builder.get_object('projectid_selector')
self._projectid_changed_disable = True
try:
proj = accountingservice.get_project()
update_comboboxtext_choices(pidsel, sorted(self.instrument.services['accounting'].get_projectids()),
set_to=proj.projectid)
self.builder.get_object('proposer_label').set_text(
proj.proposer)
self.builder.get_object('projectname_label').set_text(
proj.projectname)
finally:
self._projectid_changed_disable = False
def on_privileges_changed(self, selector):
if not self._updating_privilegeselector:
self.instrument.services['accounting'].set_privilegelevel(selector.get_active_text())
return False
def on_user_changed(self, accountingservice: Accounting, user):
self.builder.get_object('operatorname_label').set_text(
user.username)
def on_accounting_privlevel_changed(self, accountingservice: Accounting, privlevel: PrivilegeLevel):
logger.debug('Updating privileges selector. Current privilege level: {}'.format(privlevel))
self._updating_privilegeselector = True
try:
update_comboboxtext_choices(
self.builder.get_object('privileges_selector'),
accountingservice.get_accessible_privlevels_str(accountingservice.current_user.privlevel),
set_to=privlevel.name)
finally:
self._updating_privilegeselector = False
| bsd-3-clause | -1,270,912,631,302,270,000 | 46.567568 | 117 | 0.633523 | false |
AppEnlight/channelstream_twisted_test | channelstream/server.py | 1 | 1429 | import uuid, sys
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
from autobahn.twisted.resource import WebSocketResource, \
WSGIRootResource
from wsgi_app import make_app
from ws_protocol import BroadcastServerFactory, BroadcastServerProtocol
def run_server(config):
if config['debug']:
debug = True
else:
debug = False
debug = True
observer = log.PythonLoggingObserver()
observer.start()
if debug:
log.startLogging(sys.stdout)
ServerFactory = BroadcastServerFactory
factory = ServerFactory(
"ws://%s:%s" % (config['host'], config['port']),
debug=debug,
debugCodePaths=debug,
externalPort=config['external_port'])
factory.protocol = BroadcastServerProtocol
wsResource = WebSocketResource(factory)
## create a Twisted Web WSGI resource for our Pyramid server
app = make_app(config)
wsgiResource = WSGIResource(reactor, reactor.getThreadPool(), app)
## create a root resource serving everything via WSGI/, but
## the path "/ws" served by our WebSocket stuff
rootResource = WSGIRootResource(wsgiResource, {'ws': wsResource})
## create a Twisted Web Site and run everything
##
site = Site(rootResource)
reactor.listenTCP(config['port'], site, interface=config['host'])
reactor.run()
| bsd-3-clause | -2,878,703,117,169,364,500 | 30.065217 | 71 | 0.704689 | false |
HarkDev/sklearn.onsteroids | feature_engineering/DateTransformer.py | 1 | 1298 | class DateTransformer(TransformerMixin):
"""
Converts a datetime type column, into 3 separate columns that represent:
* year
* month
* day
* day_of_week
Parameters
**********
* date_column: Name of the date column.
* include_day_of_week: Defaults True. Set to False to avoid creating this column.
* drop_date_column: Default True. Either to drop the source column or not.
"""
def __init__(self, date_column, include_day_of_week=True, drop_date_column=True):
self.date_column = date_column
self.drop_date_column = drop_date_column
self.include_day_of_week = include_day_of_week
def transform(self, X, *_):
# Get each part of the date onto a separate column
X['year'] = X[self.date_column].dt.month.astype(np.uint16)
X['month'] = X[self.date_column].dt.month.astype(np.int8)
X['day'] = X[self.date_column].dt.day.astype(np.int8)
if self.include_day_of_week:
# Get the day of week
X['day_of_week'] = X[self.date_column].dt.dayofweek.astype(np.int8)
# Drop the date column if requested
if self.drop_date_column:
X.drop([self.date_column], axis=1, inplace=True)
return X
def fit(self, *_):
return self | mit | 776,890,732,986,759,700 | 32.307692 | 85 | 0.612481 | false |
mirkolai/evaluate_post_disaster_planning_with_social_medias | 004 - util script - recover admin order levels - wheter phenomena.py | 1 | 3187 | __author__ = 'mirko'
import sys
import time
import oauth2 as oauth
import json
import config as cfg
import MySQLdb
db = MySQLdb.connect(host=cfg.mysql['host'], # your host, usually localhost
user=cfg.mysql['user'], # your username
passwd=cfg.mysql['passwd'], # your password
db=cfg.mysql['db']) # name of the data base
cur = db.cursor()
db.set_character_set('utf8')
cur.execute('SET NAMES utf8mb4;')
cur.execute('SET CHARACTER SET utf8;')
cur.execute('SET character_set_connection=utf8mb4;')
db.commit()
CONSUMER_KEY = cfg.twitter['CONSUMER_KEY']
CONSUMER_SECRET = cfg.twitter['CONSUMER_SECRET']
ACCESS_KEY = cfg.twitter['ACCESS_KEY']
ACCESS_SECRET = cfg.twitter['ACCESS_SECRET']
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
access_token = oauth.Token(key=ACCESS_KEY, secret=ACCESS_SECRET)
clientTwitter = oauth.Client(consumer, access_token)
cur.execute("SELECT id, json FROM tweet_weather_phenomena")
tweets = cur.fetchall()
for tweet in tweets:
tweet_id=tweet[0]
tweet_json=tweet[1]
codes=None
jsonTweet=json.loads(tweet[1])
if jsonTweet['place']!=None:
place_id=jsonTweet['place']['id']
cur.execute("SELECT id, json FROM twitter_places where id=%s",(place_id))
results = cur.fetchone()
if results!=None:
if '174368:admin_order_id' in json.loads(results[1])['attributes']:
codes=json.loads(results[1])['attributes']['174368:admin_order_id']
else:
place_endpoint = "https://api.twitter.com/1.1/geo/id/"+place_id+".json"
response, data = clientTwitter.request(place_endpoint)
if response['status']=='200':
if int(response['x-rate-limit-remaining'])<2:
print 'Reverse Geocoding: wait '+str( int(response['x-rate-limit-reset']) - int(time.time()) )+' seconds'
time.sleep(int(response['x-rate-limit-reset'])-int(time.time()))
result=json.loads(data)
print result
if '174368:admin_order_id' in result['attributes']:
codes = result['attributes']['174368:admin_order_id']
cur.execute("INSERT twitter_places (id,json) "
"VALUES (%s,%s) "
"on duplicate key update id=id",
(place_id,data))
db.commit()
print ': wait 60 seconds'
time.sleep((15*60)/int(response['x-rate-limit-limit']))
if codes!=None:
#example ITA:07::::::010:010025
if codes[0:3]=='ITA':
admin_order_1=codes[4:6]
admin_order_2=codes[12:15]
admin_order_3=codes[16:]
print admin_order_1,admin_order_2,admin_order_3
cur.execute("UPDATE tweet_weather_phenomena "
"SET admin_order_1=%s,"
"admin_order_2=%s,"
"admin_order_3=%s WHERE id=%s",
(admin_order_1,admin_order_2,admin_order_3,tweet_id))
db.commit() | gpl-2.0 | -4,731,220,389,659,833,000 | 37.878049 | 125 | 0.566991 | false |
ddurieux/alignak | test/test_escalations.py | 1 | 31118 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, [email protected]
# aviau, [email protected]
# Grégory Starck, [email protected]
# Hartmut Goebel, [email protected]
# Sebastien Coavoux, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test host- and service-downtimes.
#
from alignak_test import *
from alignak.objects.serviceescalation import Serviceescalation
class TestEscalations(AlignakTest):
def setUp(self):
self.setup_with_file('etc/alignak_escalations.cfg')
time_hacker.set_real_time()
def test_wildcard_in_service_descrption(self):
self.print_header()
sid = int(Serviceescalation.id) - 1
generated = self.sched.conf.escalations.find_by_name('Generated-Serviceescalation-%d' % sid)
for svc in self.sched.services.find_srvs_by_hostname("test_host_0"):
self.assertIn(generated, svc.escalations)
def test_simple_escalation(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print "- 1 x OK -------------------------------------"
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
tolevel2 = self.sched.conf.escalations.find_by_name('ToLevel2')
self.assertIsNot(tolevel2, None)
self.assertIn(tolevel2, svc.escalations)
tolevel3 = self.sched.conf.escalations.find_by_name('ToLevel3')
self.assertIsNot(tolevel3, None)
self.assertIn(tolevel3, svc.escalations)
for es in svc.escalations:
print es.__dict__
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print "- 1 x BAD get soft -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print "---current_notification_number", svc.current_notification_number
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print "- 1 x BAD get hard -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# We check if we really notify the level1
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;CRITICAL;')
self.show_and_clear_logs()
#self.show_and_clear_actions()
self.show_actions()
print svc.notifications_in_progress
for n in svc.notifications_in_progress.values():
print n
# check_notification: yes (hard)
print "---current_notification_number", svc.current_notification_number
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print "OK, level1 is notified, notif nb = 1"
print "---------------------------------1st round with a hard"
print "find a way to get the number of the last reaction"
cnn = svc.current_notification_number
print "- 1 x BAD repeat -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assertIn(True, [n.escalated for n in self.sched.actions.values()])
# Now we raise the notif number of 2, so we can escalade
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print "cnn and cur", cnn, svc.current_notification_number
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
# One more bad, we go 3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assertIn(True, [n.escalated for n in self.sched.actions.values()])
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
# We go 4, still level2
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assertIn(True, [n.escalated for n in self.sched.actions.values()])
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
# We go 5! we escalade to level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assertIn(True, [n.escalated for n in self.sched.actions.values()])
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we send 10 more notif, we must be still level5
for i in range(10):
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we recover, it will be fun because all of level{1,2,3} must be send a
# notif
self.scheduler_loop(2, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_actions()
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;OK;')
self.show_and_clear_logs()
def test_time_based_escalation(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0_time")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print "- 1 x OK -------------------------------------"
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
# We check if we correclty linked our escalations
tolevel2_time = self.sched.conf.escalations.find_by_name('ToLevel2-time')
self.assertIsNot(tolevel2_time, None)
self.assertIn(tolevel2_time, svc.escalations)
tolevel3_time = self.sched.conf.escalations.find_by_name('ToLevel3-time')
self.assertIsNot(tolevel3_time, None)
self.assertIn(tolevel3_time, svc.escalations)
# Go for the running part!
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print "- 1 x BAD get soft -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print "---current_notification_number", svc.current_notification_number
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print "- 1 x BAD get hard -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# We check if we really notify the level1
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
# check_notification: yes (hard)
print "---current_notification_number", svc.current_notification_number
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print "OK, level1 is notified, notif nb = 1"
print "---------------------------------1st round with a hard"
print "find a way to get the number of the last reaction"
cnn = svc.current_notification_number
print "- 1 x BAD repeat -------------------------------------"
# For the test, we hack the notif value because we do not wan to wait 1 hour!
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
svc.notification_interval = 3600
# and we say that there is still 1hour since the notification creation
# so it will say the notification time is huge, and so it will escalade
n.creation_time = n.creation_time - 3600
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.001)
# Now we raise a notification time of 1hour, we escalade to level2
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print "cnn and cur", cnn, svc.current_notification_number
# We check that we really raise the notif number too
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif
n.t_to_go = time.time()
# One more bad, we say: he, it's still near 1 hour, so still level2
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
# Now we go for level3, so again we say: he, in fact we start one hour earlyer,
# so the total notification duration is near 2 hour, so we will raise level3
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
n.t_to_go = time.time()
n.creation_time = n.creation_time - 3600
# One more, we bypass 7200, so now it's level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we send 10 more notif, we must be still level5
for i in range(10):
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
n.t_to_go = time.time()
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we recover, it will be fun because all of level{1,2,3} must be send a
# recovery notif
self.scheduler_loop(2, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_actions()
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;OK;')
self.show_and_clear_logs()
# Here we search to know if a escalation really short the notification
# interval if the escalation if BEFORE the next notification. For example
# let say we notify one a day, if the escalation if at 4hour, we need
# to notify at t=0, and get the next notification at 4h, and not 1day.
def test_time_based_escalation_with_shorting_interval(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0_time")
# To make tests quicker we make notifications send very quickly
# 1 day notification interval
svc.notification_interval = 1400
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print "- 1 x OK -------------------------------------"
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
# We check that we really linked our escalations :)
tolevel2_time = self.sched.conf.escalations.find_by_name('ToLevel2-time')
self.assertIsNot(tolevel2_time, None)
self.assertIn(tolevel2_time, svc.escalations)
tolevel3_time = self.sched.conf.escalations.find_by_name('ToLevel3-time')
self.assertIsNot(tolevel3_time, None)
self.assertIn(tolevel3_time, svc.escalations)
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print "- 1 x BAD get soft -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print "---current_notification_number", svc.current_notification_number
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print "- 1 x BAD get hard -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
print " ** LEVEL1 ** " * 20
# We check if we really notify the level1
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
# check_notification: yes (hard)
print "---current_notification_number", svc.current_notification_number
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print "OK, level1 is notified, notif nb = 1"
print "---------------------------------1st round with a hard"
print "find a way to get the number of the last reaction"
cnn = svc.current_notification_number
print "- 1 x BAD repeat -------------------------------------"
# Now we go for the level2 escalation, so we will need to say: he, it's 1 hour since the begining:p
print "*************Next", svc.notification_interval * svc.__class__.interval_length
# first, we check if the next notification will really be near 1 hour because the escalation
# to level2 is asking for it. If it don't, the standard was 1 day!
for n in svc.notifications_in_progress.values():
next = svc.get_next_notification_time(n)
print abs(next - now)
# Check if we find the next notification for the next hour,
# and not for the next day like we ask before
self.assertLess(abs(next - now - 3600), 10)
# And we hack the notification so we can raise really the level2 escalation
for n in svc.notifications_in_progress.values():
n.t_to_go = time.time()
n.creation_time -= 3600
print " ** LEVEL2 ** " * 20
# We go in trouble too
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.001)
# Now we raise the time since the begining at 1 hour, so we can escalade
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print "Level 2 got warn, now we search for level3"
print "cnn and cur", cnn, svc.current_notification_number
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
# Now the same thing, but for level3, so one more hour
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
n.t_to_go = time.time()
n.creation_time -= 3600
# One more bad, we say: he, it's 7200 sc of notif, so must be still level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
for n in svc.notifications_in_progress.values():
# we say that the next notif will be right now
# so we can raise a notif now
n.t_to_go = time.time()
# One more, we bypass 7200, so now it's still level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we send 10 more notif, we must be still level3
for i in range(10):
for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
n.t_to_go = time.time()
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Ok now we get the normal stuff, we do NOT want to raise so soon a
# notification.
self.scheduler_loop(2, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.show_actions()
print svc.notifications_in_progress
# Should be far away
for n in svc.notifications_in_progress.values():
print n, n.t_to_go, time.time(), n.t_to_go - time.time()
# Should be "near" one day now, so 84000s
self.assertLess(8300 < abs(n.t_to_go - time.time()), 85000)
# And so no notification
self.assert_no_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
# Now we recover, it will be fun because all of level{1,2,3} must be send a
# recovery notif
self.scheduler_loop(2, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_actions()
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;OK;')
self.show_and_clear_logs()
def test_time_based_escalation_with_short_notif_interval(self):
self.print_header()
# retry_interval 2
# critical notification
# run loop -> another notification
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0_time_long_notif_interval")
# For this specific test, notif interval will be something like 10s
#svc.notification_interval = 0.1
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
self.scheduler_loop(1, [[host, 0, 'UP']], do_sleep=True, sleep_time=0.1)
print "- 1 x OK -------------------------------------"
self.scheduler_loop(1, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.assertEqual(0, svc.current_notification_number)
# We hack the interval_length for short time, like 10s
svc.__class__.interval_length = 5
# We check if we correclty linked our escalations
tolevel2_time = self.sched.conf.escalations.find_by_name('ToLevel2-shortinterval')
self.assertIsNot(tolevel2_time, None)
self.assertIn(tolevel2_time, svc.escalations)
#tolevel3_time = self.sched.conf.escalations.find_by_name('ToLevel3-time')
#self.assertIsNot(tolevel3_time, None)
#self.assertIn(tolevel3_time, svc.escalations)
# Go for the running part!
#--------------------------------------------------------------
# service reaches soft;1
# there must not be any notification
#--------------------------------------------------------------
print "- 1 x BAD get soft -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# check_notification: not (soft)
print "---current_notification_number", svc.current_notification_number
#--------------------------------------------------------------
# service reaches hard;2
# a notification must have been created
# notification number must be 1
#--------------------------------------------------------------
print "- 1 x BAD get hard -------------------------------------"
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
# We check if we really notify the level1
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
# check_notification: yes (hard)
print "---current_notification_number", svc.current_notification_number
# notification_number is already sent. the next one has been scheduled
# and is waiting for notification_interval to pass. so the current
# number is 2
self.assertEqual(1, svc.current_notification_number)
print "OK, level1 is notified, notif nb = 1"
print "---------------------------------1st round with a hard"
print "find a way to get the number of the last reaction"
cnn = svc.current_notification_number
print "- 1 x BAD repeat -------------------------------------"
# For the test, we hack the notif value because we do not wan to wait 1 hour!
#for n in svc.notifications_in_progress.values():
# HOP, we say: it's already 3600 second since the last notif,
# svc.notification_interval = 3600
# and we say that there is still 1hour since the notification creation
# so it will say the notification time is huge, and so it will escalade
# n.creation_time = n.creation_time - 3600
# Sleep 1min and look how the notification is going, only 6s because we will go in
# escalation in 5s (5s = interval_length, 1 for escalation time)
print "---" * 200
print "We wait a bit, but not enough to go in escalation level2"
time.sleep(2)
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.001)
# Now we raise a notification time of 1hour, we escalade to level2
self.assert_no_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print "---" * 200
print "OK NOW we will have an escalation!"
time.sleep(5)
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.001)
# Now we raise a notification time of 1hour, we escalade to level2
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
self.show_actions()
print "cnn and cur", cnn, svc.current_notification_number
# We check that we really raise the notif number too
self.assertGreater(svc.current_notification_number, cnn)
cnn = svc.current_notification_number
# Ok we should have one notification
next_notifications = svc.notifications_in_progress.values()
print "LEN", len(next_notifications)
for n in next_notifications:
print n
self.assertEqual(1, len(next_notifications))
n = next_notifications.pop()
print "Current NOTIFICATION", n.__dict__, n.t_to_go, time.time(), n.t_to_go - time.time(), n.already_start_escalations
# Should be in the escalation ToLevel2-shortinterval
self.assertIn('ToLevel2-shortinterval', n.already_start_escalations)
# Ok we want to be sure we are using the current escalation interval, the 1 interval = 5s
# So here we should have a new notification for level2
print "*--*--" * 20
print "Ok now another notification during the escalation 2"
time.sleep(10)
# One more bad, we say: he, it's still near 1 hour, so still level2
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.show_and_clear_logs()
# Ok now go in the Level3 thing
print "*--*--" * 20
print "Ok now goes in level3 too"
time.sleep(10)
# One more, we bypass 7200, so now it's level3
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;CRITICAL;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Ok we should have one notification
next_notifications = svc.notifications_in_progress.values()
self.assertEqual(1, len(next_notifications))
n = next_notifications.pop()
print "Current NOTIFICATION", n.__dict__, n.t_to_go, time.time(), n.t_to_go - time.time(), n.already_start_escalations
# Should be in the escalation ToLevel2-shortinterval
self.assertIn('ToLevel2-shortinterval', n.already_start_escalations)
self.assertIn('ToLevel3-shortinterval', n.already_start_escalations)
# Make a loop for pass the next notification
time.sleep(5)
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
print "Current NOTIFICATION", n.__dict__, n.t_to_go, time.time(), n.t_to_go - time.time(), n.already_start_escalations
# Now way a little bit, and with such low value, the escalation3 value must be ok for this test to pass
time.sleep(5)
self.scheduler_loop(1, [[svc, 2, 'BAD']], do_sleep=True, sleep_time=0.1)
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;CRITICAL;')
self.show_and_clear_logs()
# Now we recover, it will be fun because all of level{1,2,3} must be send a
# recovery notif
self.scheduler_loop(2, [[svc, 0, 'OK']], do_sleep=True, sleep_time=0.1)
self.show_actions()
self.assert_any_log_match('SERVICE NOTIFICATION: level1.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level2.*;OK;')
self.assert_any_log_match('SERVICE NOTIFICATION: level3.*;OK;')
self.show_and_clear_logs()
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | 6,491,218,345,128,111,000 | 46.872308 | 126 | 0.584664 | false |
smarinov/playground | aoc2017/spinlock.py | 1 | 2078 | """Advent of Code 2017, Day 17: Spinlock"""
import unittest
def get_num_after_zero(no_steps: int, last_number: int) -> int:
"""Quickly iterate as the spinlock and return the number right of zero.
Args:
no_steps(int): The number of steps of the pinlock after each insert.
last_number(int): The last number the spinlock wants to insert.
Returns:
int. The number located to the right of the number 0 after last insert.
"""
pos, ret = 0, 0
for number in range(1, last_number + 1):
pos = (pos + no_steps) % number + 1
if pos == 1:
ret = number
return ret
def get_num_after_last_inserted(no_steps: int, last_number: int) -> int:
"""Slowly iterate as the spinlock and return the number after last insert.
Args:
no_steps(int): The number of steps of the pinlock after each insert.
last_number(int): The last number the spinlock wants to insert.
Returns:
int. The number located to the right of the last inserted number.
"""
buff = [0]
pos = 0
for number in range(1, last_number + 1):
pos = (pos + no_steps) % len(buff) + 1
buff.insert(pos, number)
return buff[(pos + 1) % len(buff)]
class TestSpinlock(unittest.TestCase):
"""Tests the functions simulating the behaviour of the spinlock."""
def test_task_description(self):
"""Tests the solution over the sample tests in problem statement."""
self.assertEqual(get_num_after_last_inserted(3, 9), 5)
self.assertEqual(get_num_after_last_inserted(3, 2017), 638)
self.assertEqual(get_num_after_zero(3, 9), 9)
def test_additional(self):
"""Tests the solution over the additional tests for this task."""
self.assertEqual(get_num_after_zero(3, 2017), 1226)
self.assertEqual(get_num_after_zero(3, int(50e6)), 1222153)
self.assertEqual(get_num_after_last_inserted(367, 2017), 1487)
self.assertEqual(get_num_after_zero(367, int(50e6)), 25674054)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,848,424,267,357,272,000 | 31.46875 | 79 | 0.637151 | false |
michaelsmit/openparliament | parliament/alerts/views.py | 1 | 7175 | import re
from django.template import loader, RequestContext
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render
from django import forms
from django.conf import settings
from django.contrib import messages
from django.core import urlresolvers
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail, mail_admins
from django.core.signing import Signer, TimestampSigner, BadSignature
from django.views.decorators.cache import never_cache
from parliament.accounts.models import User
from parliament.alerts.models import Subscription
from parliament.core.models import Politician
from parliament.core.views import disable_on_readonly_db
from parliament.utils.views import JSONView
class PoliticianAlertForm(forms.Form):
email = forms.EmailField(label='Your email')
politician = forms.IntegerField(widget=forms.HiddenInput)
@disable_on_readonly_db
def politician_hansard_signup(request):
try:
politician_id = int(re.sub(r'\D', '', request.REQUEST.get('politician', '')))
except ValueError:
raise Http404
pol = get_object_or_404(Politician, pk=politician_id)
success = False
if request.method == 'POST':
# This is a hack to remove spaces from e-mails before sending them off to the validator
# If anyone knows a cleaner way of doing this without writing a custom field, please let me know
postdict = request.POST.copy()
if 'email' in postdict:
postdict['email'] = postdict['email'].strip().lower()
form = PoliticianAlertForm(postdict)
if form.is_valid():
if form.cleaned_data['email'] == request.authenticated_email:
Subscription.objects.get_or_create_by_query(
_generate_query_for_politician(pol),
request.authenticated_email_user
)
messages.success(request, u"You're signed up for alerts for %s." % pol.name)
return HttpResponseRedirect(urlresolvers.reverse('alerts_list'))
key = "%s,%s" % (politician_id, form.cleaned_data['email'])
signed_key = TimestampSigner(salt='alerts_pol_subscribe').sign(key)
activate_url = urlresolvers.reverse('alerts_pol_subscribe',
kwargs={'signed_key': signed_key})
activation_context = RequestContext(request, {
'pol': pol,
'activate_url': activate_url,
})
t = loader.get_template("alerts/activate.txt")
send_mail(subject=u'Confirmation required: Email alerts about %s' % pol.name,
message=t.render(activation_context),
from_email='[email protected]',
recipient_list=[form.cleaned_data['email']])
success = True
else:
initial = {
'politician': politician_id
}
if request.authenticated_email:
initial['email'] = request.authenticated_email
form = PoliticianAlertForm(initial=initial)
c = RequestContext(request, {
'form': form,
'success': success,
'pol': pol,
'title': 'Email alerts for %s' % pol.name,
})
t = loader.get_template("alerts/signup.html")
return HttpResponse(t.render(c))
@never_cache
def alerts_list(request):
if not request.authenticated_email:
return render(request, 'alerts/list_unauthenticated.html',
{'title': 'Email alerts'})
user = User.objects.get(email=request.authenticated_email)
if request.session.get('pending_alert'):
Subscription.objects.get_or_create_by_query(request.session['pending_alert'], user)
del request.session['pending_alert']
subscriptions = Subscription.objects.filter(user=user).select_related('topic')
t = loader.get_template('alerts/list.html')
c = RequestContext(request, {
'user': user,
'subscriptions': subscriptions,
'title': 'Your email alerts'
})
resp = HttpResponse(t.render(c))
resp.set_cookie(
key='enable-alerts',
value='y',
max_age=60*60*24*90,
httponly=False
)
return resp
class CreateAlertView(JSONView):
def post(self, request):
query = request.POST.get('query')
if not query:
raise Http404
user_email = request.authenticated_email
if not user_email:
request.session['pending_alert'] = query
return self.redirect(urlresolvers.reverse('alerts_list'))
user = User.objects.get(email=user_email)
try:
subscription = Subscription.objects.get_or_create_by_query(query, user)
return True
except ValueError:
raise NotImplementedError
create_alert = CreateAlertView.as_view()
class ModifyAlertView(JSONView):
def post(self, request, subscription_id):
subscription = get_object_or_404(Subscription, id=subscription_id)
if subscription.user.email != request.authenticated_email:
raise PermissionDenied
action = request.POST.get('action')
if action == 'enable':
subscription.active = True
subscription.save()
elif action == 'disable':
subscription.active = False
subscription.save()
elif action == 'delete':
subscription.delete()
return True
modify_alert = ModifyAlertView.as_view()
def _generate_query_for_politician(pol):
return u'MP: "%s" Type: "debate"' % pol.identifier
@disable_on_readonly_db
def politician_hansard_subscribe(request, signed_key):
ctx = {
'key_error': False
}
try:
key = TimestampSigner(salt='alerts_pol_subscribe').unsign(signed_key, max_age=60*60*24*14)
politician_id, _, email = key.partition(',')
pol = get_object_or_404(Politician, id=politician_id)
if not pol.current_member:
raise Http404
user, created = User.objects.get_or_create(email=email)
sub, created = Subscription.objects.get_or_create_by_query(
_generate_query_for_politician(pol), user)
if not sub.active:
sub.active = True
sub.save()
ctx.update(
pol=pol,
title=u'Email alerts for %s' % pol.name
)
except BadSignature:
ctx['key_error'] = True
return render(request, 'alerts/activate.html', ctx)
@never_cache
def unsubscribe(request, key):
ctx = {
'title': 'Email alerts'
}
try:
subscription_id = Signer(salt='alerts_unsubscribe').unsign(key)
subscription = get_object_or_404(Subscription, id=subscription_id)
subscription.active = False
subscription.save()
if settings.PARLIAMENT_DB_READONLY:
mail_admins("Unsubscribe request", subscription_id)
ctx['query'] = subscription.topic
except BadSignature:
ctx['key_error'] = True
c = RequestContext(request, ctx)
t = loader.get_template("alerts/unsubscribe.html")
return HttpResponse(t.render(c))
| agpl-3.0 | -7,349,912,890,321,646,000 | 34.519802 | 104 | 0.638188 | false |
wking/thumbor | thumbor/url.py | 1 | 5290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import re
from urllib import quote
class Url(object):
unsafe_or_hash = r'(?:(?:(?P<unsafe>unsafe)|(?P<hash>.+?))/)?'
debug = '(?:(?P<debug>debug)/)?'
meta = '(?:(?P<meta>meta)/)?'
trim = '(?:(?P<trim>trim(?::(?:top-left|bottom-right))?(?::\d+)?)/)?'
crop = '(?:(?P<crop_left>\d+)x(?P<crop_top>\d+):(?P<crop_right>\d+)x(?P<crop_bottom>\d+)/)?'
fit_in = '(?:(?P<adaptive>adaptive-)?(?P<full>full-)?(?P<fit_in>fit-in)/)?'
dimensions = '(?:(?P<horizontal_flip>-)?(?P<width>(?:\d+|orig))?x(?P<vertical_flip>-)?(?P<height>(?:\d+|orig))?/)?'
halign = r'(?:(?P<halign>left|right|center)/)?'
valign = r'(?:(?P<valign>top|bottom|middle)/)?'
smart = r'(?:(?P<smart>smart)/)?'
filters = r'(?:filters:(?P<filters>.+?\))/)?'
image = r'(?P<image>.+)'
compiled_regex = None
@classmethod
def regex(cls, has_unsafe_or_hash=True):
reg = ['/?']
if has_unsafe_or_hash:
reg.append(cls.unsafe_or_hash)
reg.append(cls.debug)
reg.append(cls.meta)
reg.append(cls.trim)
reg.append(cls.crop)
reg.append(cls.fit_in)
reg.append(cls.dimensions)
reg.append(cls.halign)
reg.append(cls.valign)
reg.append(cls.smart)
reg.append(cls.filters)
reg.append(cls.image)
return ''.join(reg)
@classmethod
def parse_decrypted(cls, url):
if cls.compiled_regex:
reg = cls.compiled_regex
else:
reg = cls.compiled_regex = re.compile(cls.regex(has_unsafe_or_hash=False))
result = reg.match(url)
if not result:
return None
result = result.groupdict()
int_or_0 = lambda value: 0 if value is None else int(value)
values = {
'debug': result['debug'] == 'debug',
'meta': result['meta'] == 'meta',
'trim': result['trim'],
'crop': {
'left': int_or_0(result['crop_left']),
'top': int_or_0(result['crop_top']),
'right': int_or_0(result['crop_right']),
'bottom': int_or_0(result['crop_bottom'])
},
'adaptive': result['adaptive'] == 'adaptive',
'full': result['full'] == 'full',
'fit_in': result['fit_in'] == 'fit-in',
'width': result['width'] == 'orig' and 'orig' or int_or_0(result['width']),
'height': result['height'] == 'orig' and 'orig' or int_or_0(result['height']),
'horizontal_flip': result['horizontal_flip'] == '-',
'vertical_flip': result['vertical_flip'] == '-',
'halign': result['halign'] or 'center',
'valign': result['valign'] or 'middle',
'smart': result['smart'] == 'smart',
'filters': result['filters'] or '',
'image': 'image' in result and result['image'] or None
}
return values
@classmethod # NOQA
def generate_options(cls,
debug=False,
width=0,
height=0,
smart=False,
meta=False,
trim=None,
adaptive=False,
full=False,
fit_in=False,
horizontal_flip=False,
vertical_flip=False,
halign='center',
valign='middle',
crop_left=None,
crop_top=None,
crop_right=None,
crop_bottom=None,
filters=None):
url = []
if debug:
url.append('debug')
if meta:
url.append('meta')
if trim:
if isinstance(trim, bool):
url.append('trim')
else:
url.append('trim:%s' % trim)
crop = crop_left or crop_top or crop_right or crop_bottom
if crop:
url.append('%sx%s:%sx%s' % (
crop_left,
crop_top,
crop_right,
crop_bottom
))
if fit_in:
fit_ops = []
if adaptive:
fit_ops.append('adaptive')
if full:
fit_ops.append('full')
fit_ops.append('fit-in')
url.append('-'.join(fit_ops))
if horizontal_flip:
width = '-%s' % width
if vertical_flip:
height = '-%s' % height
if width or height:
url.append('%sx%s' % (width, height))
if halign != 'center':
url.append(halign)
if valign != 'middle':
url.append(valign)
if smart:
url.append('smart')
if filters:
url.append('filters:%s' % filters)
return '/'.join(url)
@classmethod
def encode_url(kls, url):
return quote(url, '/:?%=&()~",\'$')
| mit | -4,982,072,253,703,790,000 | 30.301775 | 119 | 0.46465 | false |
fbradyirl/home-assistant | homeassistant/components/automation/zone.py | 1 | 2417 | """Offer zone automation rules."""
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
CONF_EVENT,
CONF_ENTITY_ID,
CONF_ZONE,
MATCH_ALL,
CONF_PLATFORM,
)
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers import condition, config_validation as cv, location
EVENT_ENTER = "enter"
EVENT_LEAVE = "leave"
DEFAULT_EVENT = EVENT_ENTER
TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "zone",
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Required(CONF_ZONE): cv.entity_id,
vol.Required(CONF_EVENT, default=DEFAULT_EVENT): vol.Any(
EVENT_ENTER, EVENT_LEAVE
),
}
)
async def async_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
zone_entity_id = config.get(CONF_ZONE)
event = config.get(CONF_EVENT)
@callback
def zone_automation_listener(entity, from_s, to_s):
"""Listen for state changes and calls action."""
if (
from_s
and not location.has_location(from_s)
or not location.has_location(to_s)
):
return
zone_state = hass.states.get(zone_entity_id)
if from_s:
from_match = condition.zone(hass, zone_state, from_s)
else:
from_match = False
to_match = condition.zone(hass, zone_state, to_s)
# pylint: disable=too-many-boolean-expressions
if (
event == EVENT_ENTER
and not from_match
and to_match
or event == EVENT_LEAVE
and from_match
and not to_match
):
hass.async_run_job(
action(
{
"trigger": {
"platform": "zone",
"entity_id": entity,
"from_state": from_s,
"to_state": to_s,
"zone": zone_state,
"event": event,
}
},
context=to_s.context,
)
)
return async_track_state_change(
hass, entity_id, zone_automation_listener, MATCH_ALL, MATCH_ALL
)
| apache-2.0 | 7,082,102,247,609,235,000 | 28.839506 | 78 | 0.531651 | false |
faaceb/Auditoria | montarSoD.py | 1 | 1434 | # -*- coding: iso-8859-1 -*-
#Arquivo contendo as funcoes e respectivas transacoes
matrizConflitos = "matrizConflitosLibbs.csv"
#matrizConflitos = "matrizConflitos.csv"
#matrizConflitos = "matrizConflitosInternet.csv"
#matrizConflitos = "matrizConflitosInternet2.csv"
transacoes = "transacoes.csv"
#Listas e dicionarios
tcodes = []
listFuncTcodes = []
tcodesCombinados = []
dicionario = {}
#Abre o arquivo de conflitos e inclui em uma lista
with open(matrizConflitos, 'r') as arq1:
for rows in arq1:
chave = "%s;%s" % (rows.split(';')[1].replace('\r','').replace('\n',''),rows.split(';')[2].replace('\r','').replace('\n',''))
valor = "%s" % (rows.split(';')[3].replace('\r','').replace('\n',''))
dicionario[chave] = valor
#Abre o arquivo de transacoes, executa a combinacao e inclui em uma lista
with open(transacoes, 'r') as arq2:
for linha in arq2:
tcodes.append(linha.replace('\r','').replace('\n',''))
for i in tcodes:
for x in tcodes:
if i != x:
tcodesCombinados.append('%s;%s' % (i,x))
#print (dicionario['VA32;VE88'])
def verificaConflito(transacaoCombinada):
if dicionario.get(transacaoCombinada, False):
return True
else:
return False
for cadaTcode in tcodesCombinados:
if verificaConflito(cadaTcode) == True:
print('%s;%s' % (cadaTcode,dicionario.get(cadaTcode)))
print('--- FIM DO ARQUIVO ---\r\n')
| gpl-3.0 | -35,940,191,028,250,496 | 31.590909 | 133 | 0.642259 | false |
tiagofilipe12/pATLAS | patlas/utils/node_size_n_links.py | 1 | 3751 | #!/usr/bin/env python3
import sys
import json
import plotly
import plotly.graph_objs as go
def make_histogram(trace_list):
'''
Function to make an histogram from a list
Parameters
----------
trace_list: list
A list with all entries to the histogram (entries should be float)
'''
sorted_list = sorted(trace_list, reverse=True)
trace_lengths = go.Histogram(x=sorted_list,
opacity=0.75,
name="Histogram of the size ratio between "
"linked nodes")
layout = go.Layout(barmode="overlay",
xaxis=dict(
title="number of links"
),
yaxis=dict(
title="ratio between nodes"
)
)
fig = go.Figure(data=[trace_lengths], layout=layout)
plotly.offline.plot(fig, filename="dist.html", auto_open=False)
def main():
'''
This script just have main function, which basically transforms a json
file into a csv file for spreadsheet exploration
'''
# open output file
ofile = open("output_size_n_links.csv", "w")
# read input
input_json = sys.argv[1]
reader = open(input_json)
reader_dict = json.load(reader)
# write header
ofile.write("parentId;parentSize;childId;childSize;distance;sizeDiff\n")
list_lengths = []
dict_refactored_json = {"links": []}
dict_refactored_json["nodes"] = reader_dict["nodes"]
for element in reader_dict["links"]:
## get parent node related params
parent_id = element["parentId"]
parent_node = [x for x in reader_dict["nodes"] if x["id"] == parent_id]
parent_node_length = float(parent_node[0]["length"])
# and now child node related params
child_id = element["childId"]
child_node = [x for x in reader_dict["nodes"] if x["id"] == child_id]
child_node_length = float(child_node[0]["length"])
distance = element["distNSizes"]
size_diff = abs(parent_node_length - child_node_length)
size_ratio = float(min(parent_node_length, child_node_length)/
max(parent_node_length, child_node_length))
list_lengths.append(size_ratio)
# write a line in output file
ofile.write(";".join([parent_id, parent_node_length, child_id,
child_node_length, distance, size_diff,
str(size_ratio)]) + "\n")
ofile.write("{};{};{};{};{};{};{}\n".format(parent_id,
str(parent_node_length),
child_id, child_node_length,
distance, size_diff,
str(size_ratio)))
dict_refactored_json["links"].append({"parentId": parent_id,
"childId": child_id,
"distNSizes": {
"distance": distance,
"sizeRatio": size_ratio
}
})
# closes input and output files
reader.close()
ofile.close()
# make an histogram of lengths
make_histogram(list_lengths)
#
refactored_json = open("refactored_filtered.json", "w")
refactored_json.write(json.dumps(dict_refactored_json))
refactored_json.close()
if __name__ == "__main__":
main()
| gpl-3.0 | -1,156,766,333,690,464,000 | 35.067308 | 80 | 0.494002 | false |
arupiot/deskcontrol | deskcontrol/modules/navigation.py | 1 | 1502 | class StateModule(object):
always_tick = False
def __init__(self, controller):
self.id = self.__class__.__name__
self.controller = controller
def draw(self):
pass
def try_bricklet(self, uid, device_identifier, position):
pass
def navigate(self, direction):
pass
def tick(self):
return
class MenuModule(StateModule):
items = []
current = 0
def draw(self, clear=True):
self.controller.screen.draw(
"menu",
{"title": self.items[self.current][1],
"icon": self.items[self.current][1].lower()})
def add_menu_item(self, module):
self.items.append((module.id, module.menu_title))
def navigate(self, direction):
if direction == "forward":
self.controller.change_module(self.items[self.current][0])
if direction == "back":
self.controller.screen.draw_splash()
self.controller.current_module = None
if direction in ["down", "up"]:
if direction == "down":
self.current = self.current + 1
else:
self.current = self.current - 1
if self.current >= len(self.items):
self.current = 0
elif self.current < 0:
self.current = len(self.items) - 1
self.draw(clear=False)
# print("Menu: " + str(self.items[self.current][1]))
| mit | 8,111,437,387,768,366,000 | 28.04 | 70 | 0.527297 | false |
nosyndicate/pytorchrl | pytorchrl/replay.py | 1 | 6213 | import numpy as np
import rllab.misc.logger as logger
from rllab.misc import special2 as special
class SimpleReplayPool(object):
def __init__(
self,
max_pool_size,
observation_dim,
action_dim,
replacement_policy='stochastic',
replacement_prob=1.0,
max_skip_episode=10,
env=None):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._max_pool_size = max_pool_size
self._replacement_policy = replacement_policy
self._replacement_prob = replacement_prob
self._max_skip_episode = max_skip_episode
self._observations = np.zeros((max_pool_size, observation_dim),)
if env is not None and env.action_space.is_discrete:
self._actions = np.zeros((max_pool_size,),dtype=np.int64)
self._n = env.action_space.n
self._is_action_discrete = True
else:
self._actions = np.zeros((max_pool_size, action_dim),)
self._is_action_discrete = False
self._rewards = np.zeros(max_pool_size)
self._terminals = np.zeros(max_pool_size, dtype='uint8')
self._initials = np.zeros(max_pool_size, dtype='uint8')
self._observations.fill(0) # pre-allocate
self._actions.fill(0) # pre-allocate
self._terminals.fill(0) # pre-allocate
self._initials.fill(0) # pre-allocate
self._rewards.fill(0) # pre-allocate
# Bottom pointer
self._bottom = 0
# Top pointer
self._top = 0
# Size of the replay buffer
self._size = 0
def add_sample(self, observation, action, reward, terminal, initial):
"""
Add a sample to current replay buffer.
Parameters
----------
observation (np.array):
# TODO (ewei)
"""
self.check_replacement()
self._observations[self._top] = observation
if self._is_action_discrete and not isinstance(action,
(int, np.int64)):
action = special.from_onehot(action)
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._initials[self._top] = initial
self.advance()
def advance(self):
"""
Update the top pointer, bottom pointer, and size of the replay buffer.
"""
self._top = (self._top + 1) % self._max_pool_size
if self._size >= self._max_pool_size:
self._bottom = (self._bottom + 1) % self._max_pool_size
else:
self._size += 1
def check_replacement(self):
if self._replacement_prob < 1.0:
if self._size < self._max_pool_size or \
not self._initials[self._top]: return
self.advance_until_terminate()
def get_skip_flag(self):
"""
"""
if self._replacement_policy == 'full':
skip = False
elif self._replacement_policy == 'stochastic':
skip = np.random.uniform() > self._replacement_prob
else:
raise NotImplementedError
return skip
def advance_until_terminate(self):
skip = self.get_skip_flag()
n_skips = 0
old_top = self._top
new_top = (old_top + 1) % self._max_pool_size
while skip and old_top != new_top and n_skips < self._max_skip_episode:
n_skips += 1
self.advance()
while not self._initials[self._top]:
self.advance()
skip = self.get_skip_flag()
new_top = self._top
logger.log("add_sample, skipped %d episodes, top=%d->%d"%(
n_skips, old_top, new_top))
def last_batch(self, batch_size):
assert self._size >= batch_size
if self._top >= batch_size:
observations=self._observations[self._top-batch_size:self._top]
else:
assert self._size == self._max_pool_size
obs1 = self._observations[self._max_pool_size+
self._top-batch_size:]
obs2 = self._observations[:self._top]
observations = np.concatenate((obs1, obs2), axis=0)
return dict(
observations = observations,
)
def random_batch(self, batch_size):
"""
Draw a random batch from the replay buffer.
Parameters
----------
batch_size (int): The size of the batch.
Returns
-------
sample_batch (dict): A dict contains the state, action,
reward, terminal, next_state
"""
assert self._size >= batch_size
indices = np.zeros(batch_size, dtype='uint64')
transition_indices = np.zeros(batch_size, dtype='uint64')
count = 0
while count < batch_size:
index = np.random.randint(self._bottom, self._bottom + self._size) % self._max_pool_size
# make sure that the transition is valid: if we are at the end of the pool, we need to discard
# this sample
if index == self._size - 1 and self._size <= self._max_pool_size:
continue
# if self._terminals[index]:
# continue
transition_index = (index + 1) % self._max_pool_size
# make sure that the transition is valid: discard the transition if it crosses horizon-triggered resets
if not self._terminals[index] and self._initials[transition_index]:
continue
indices[count] = index
transition_indices[count] = transition_index
count += 1
actions = self._actions[indices]
if self._is_action_discrete:
actions = special.to_onehot_n(actions, self._n)
return dict(
observations=self._observations[indices],
actions=actions,
rewards=self._rewards[indices],
terminals=self._terminals[indices],
initials=self._initials[indices],
next_observations=self._observations[transition_indices]
)
@property
def size(self):
return self._size
| mit | -9,150,253,699,225,256,000 | 35.122093 | 115 | 0.55738 | false |
krzysztof/invenio-openaire | invenio_openaire/tasks.py | 1 | 4010 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""OpenAIRE service integration for Invenio repositories."""
from __future__ import absolute_import, print_function
from copy import deepcopy
from celery import chain, shared_task
from flask import current_app
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.errors import PIDDoesNotExistError
from invenio_pidstore.resolver import Resolver
from invenio_records.api import Record
from .loaders import LocalFundRefLoader, LocalOAIRELoader, \
RemoteFundRefLoader, RemoteOAIRELoader
from .minters import funder_minter, grant_minter
@shared_task(ignore_result=True)
def harvest_fundref(source=None):
"""Harvest funders from FundRef and store as authority records."""
loader = LocalFundRefLoader(source=source) if source \
else RemoteFundRefLoader()
for funder_json in loader.iter_funders():
register_funder.delay(funder_json)
@shared_task(ignore_result=True)
def harvest_openaire_projects(source=None, setspec=None):
"""Harvest grants from OpenAIRE and store as authority records."""
loader = LocalOAIRELoader(source=source) if source \
else RemoteOAIRELoader(setspec=setspec)
for grant_json in loader.iter_grants():
register_grant.delay(grant_json)
@shared_task(ignore_result=True)
def harvest_all_openaire_projects():
"""Reharvest all grants from OpenAIRE.
Harvest all OpenAIRE grants in a chain to prevent OpenAIRE
overloading from multiple parallel harvesting.
"""
setspecs = current_app.config['OPENAIRE_GRANTS_SPECS']
chain(harvest_openaire_projects.s(setspec=setspec)
for setspec in setspecs).apply_async()
@shared_task(ignore_result=True)
def register_funder(data):
"""Register the funder JSON in records and create a PID."""
create_or_update_record(data, 'frdoi', 'doi', funder_minter)
@shared_task(ignore_result=True)
def register_grant(data):
"""Register the grant JSON in records and create a PID."""
create_or_update_record(data, 'grant', 'internal_id', grant_minter)
def create_or_update_record(data, pid_type, id_key, minter):
"""Register a funder or grant."""
resolver = Resolver(
pid_type=pid_type, object_type='rec', getter=Record.get_record)
try:
pid, record = resolver.resolve(data[id_key])
data_c = deepcopy(data)
del data_c['remote_modified']
record_c = deepcopy(data)
del record_c['remote_modified']
# All grants on OpenAIRE are modified periodically even if nothing
# has changed. We need to check for actual differences in the metadata
if data_c != record_c:
record.update(data)
record.commit()
record_id = record.id
db.session.commit()
RecordIndexer().index_by_id(str(record_id))
except PIDDoesNotExistError:
record = Record.create(data)
record_id = record.id
minter(record.id, data)
db.session.commit()
RecordIndexer().index_by_id(str(record_id))
| gpl-2.0 | -4,988,418,684,989,777,000 | 35.454545 | 78 | 0.715461 | false |
111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_SESS_BORDER_CTRLR_STATS_MIB.py | 1 | 94513 | """ CISCO_SESS_BORDER_CTRLR_STATS_MIB
The main purpose of this MIB is to define the statistics
information for Session Border Controller application. This MIB
categorizes the statistics information into following types\:
1. RADIUS Messages Statistics \- Represents statistics of
various RADIUS messages for RADIUS servers with which the
client (SBC) shares a secret.
2. Rf Billing Statistics\- Represents Rf billing statistics
information which monitors the messages sent per\-realm over
IMS Rx interface by Rf billing manager(SBC).
3. SIP Statistics \- Represents SIP requests and various SIP
responses on a SIP adjacency in a given interval.
The Session Border Controller (SBC) enables direct IP\-to\-IP
interconnect between multiple administrative domains for
session\-based services providing protocol inter\-working,
security, and admission control and management. The SBC is a
voice over IP (VoIP) device that sits on the border of a
network and controls call admission to that network.
The primary purpose of an SBC is to protect the interior of the
network from excessive call load and malicious traffic.
Additional functions provided by the SBC include media bridging
and billing services.
Periodic Statistics \- Represents the SBC call statistics
information for a particular time interval. E.g. you can
specify that you want to retrieve statistics for a summary
period of the current or previous 5 minutes, 15 minutes, hour,
or day. The statistics for 5 minutes are divided into five
minute intervals past the hour \- that is, at 0 minutes, 5
minutes, 10 minutes... past the hour. When you retrieve
statistics for the current five minute period, you will be
given statistics from the start of the interval to the current
time. When you retrieve statistics for the previous five
minutes, you will be given the statistics for the entirety of
the previous interval. For example, if it is currently 12\:43
\- the current 5 minute statistics cover 12\:40 \- 12\:43
\- the previous 5 minute statistics cover 12\:35 \- 12\:40
The other intervals work similarly. 15 minute statistics are
divided into 15 minute intervals past the hour (0 minutes, 15
minutes, 30 minutes, 45 minutes). Hourly statistics are divided
into intervals on the hour. Daily statistics are divided into
intervals at 0\:00 each day. Therefore, if you retrieve the
statistics at 12\:43 for each of these intervals, the periods
covered are as follows.
\- current 15 minutes\: 12\:30 \- 12\:43
\- previous 15 minutes\: 12\:15 \- 12\:30
\- current hour\: 12\:00 \- 12\:43
\- last hour\: 11\:00 \- 12\:00
\- current day\: 00\:00 \- 12\:43
\- last day\: 00\:00 (the day before) \- 00\:00.
GLOSSARY
SBC\: Session Border Controller
CSB\: CISCO Session Border Controller
Adjacency\: An adjacency contains the system information to be
transmitted to next HOP.
ACR\: Accounting Request
ACA\: Accounting Accept
AVP\: Attribute\-Value Pairs
REFERENCES
1. CISCO Session Border Controller Documents and FAQ
http\://zed.cisco.com/confluence/display/SBC/SBC
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class CiscosbcradiusclienttypeEnum(Enum):
"""
CiscosbcradiusclienttypeEnum
This textual convention represents the type of RADIUS client.
.. data:: authentication = 1
.. data:: accounting = 2
"""
authentication = 1
accounting = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscosbcradiusclienttypeEnum']
class CiscosbcsipmethodEnum(Enum):
"""
CiscosbcsipmethodEnum
This textual convention represents the various SIP Methods.
.. data:: unknown = 1
.. data:: ack = 2
.. data:: bye = 3
.. data:: cancel = 4
.. data:: info = 5
.. data:: invite = 6
.. data:: message = 7
.. data:: notify = 8
.. data:: options = 9
.. data:: prack = 10
.. data:: refer = 11
.. data:: register = 12
.. data:: subscribe = 13
.. data:: update = 14
"""
unknown = 1
ack = 2
bye = 3
cancel = 4
info = 5
invite = 6
message = 7
notify = 8
options = 9
prack = 10
refer = 11
register = 12
subscribe = 13
update = 14
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscosbcsipmethodEnum']
class CiscoSessBorderCtrlrStatsMib(object):
"""
.. attribute:: csbradiusstatstable
This table has the reporting statistics of various RADIUS messages for RADIUS servers with which the client (SBC) shares a secret. Each entry in this table is identified by a value of csbRadiusStatsEntIndex. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: :py:class:`Csbradiusstatstable <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbradiusstatstable>`
.. attribute:: csbrfbillrealmstatstable
This table describes the Rf billing statistics information which monitors the messages sent per\-realm by Rf billing manager(SBC). SBC sends Rf billing data using Diameter as a transport protocol. Rf billing uses only ACR and ACA Diameter messages for the transport of billing data. The Accounting\-Record\-Type AVP on the ACR message labels the type of the accounting request. The following types are used by Rf billing. 1. For session\-based charging, the types Start (session begins), Interim (session is modified) and Stop (session ends) are used. 2. For event\-based charging, the type Event is used when a chargeable event occurs outside the scope of a session. Each row of this table is identified by a value of csbRfBillRealmStatsIndex and csbRfBillRealmStatsRealmName. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: :py:class:`Csbrfbillrealmstatstable <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbrfbillrealmstatstable>`
.. attribute:: csbsipmthdcurrentstatstable
This table reports count of SIP request and various SIP responses for each SIP method on a SIP adjacency in a given interval. Each entry in this table represents a SIP method, its incoming and outgoing count, individual incoming and outgoing count of various SIP responses for this method on a SIP adjacency in a given interval. To understand the meaning of interval please refer <Periodic Statistics> section in description of ciscoSbcStatsMIB. This table is indexed on csbSIPMthdCurrentStatsAdjName, csbSIPMthdCurrentStatsMethod and csbSIPMthdCurrentStatsInterval. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: :py:class:`Csbsipmthdcurrentstatstable <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbsipmthdcurrentstatstable>`
.. attribute:: csbsipmthdhistorystatstable
This table provide historical count of SIP request and various SIP responses for each SIP method on a SIP adjacency in various interval length defined by the csbSIPMthdHistoryStatsInterval object. Each entry in this table represents a SIP method, its incoming and outgoing count, individual incoming and outgoing count of various SIP responses for this method on a SIP adjacency in a given interval. The possible values of interval will be previous 5 minutes, previous 15 minutes, previous 1 hour and previous day. To understand the meaning of interval please refer <Periodic Statistics> description of ciscoSbcStatsMIB. This table is indexed on csbSIPMthdHistoryStatsAdjName, csbSIPMthdHistoryStatsMethod and csbSIPMthdHistoryStatsInterval. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: :py:class:`Csbsipmthdhistorystatstable <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbsipmthdhistorystatstable>`
.. attribute:: csbsipmthdrccurrentstatstable
This table reports SIP method request and response code statistics for each method and response code combination on given SIP adjacency in a given interval. To understand the meaning of interval please refer <Periodic Statistics> section in description of ciscoSbcStatsMIB. An exact lookup will return a row only if \- 1) detailed response code statistics are turned on in SBC 2) response code messages sent or received is non zero for given SIP adjacency, method and interval. Also an inexact lookup will only return rows for messages with non\-zero counts, to protect the user from large numbers of rows for response codes which have not been received or sent
**type**\: :py:class:`Csbsipmthdrccurrentstatstable <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbsipmthdrccurrentstatstable>`
.. attribute:: csbsipmthdrchistorystatstable
This table reports historical data for SIP method request and response code statistics for each method and response code combination in a given past interval. The possible values of interval will be previous 5 minutes, previous 15 minutes, previous 1 hour and previous day. To understand the meaning of interval please refer <Periodic Statistics> section in description of ciscoSbcStatsMIB. An exact lookup will return a row only if \- 1) detailed response code statistics are turned on in SBC 2) response code messages sent or received is non zero for given SIP adjacency, method and interval. Also an inexact lookup will only return rows for messages with non\-zero counts, to protect the user from large numbers of rows for response codes which have not been received or sent
**type**\: :py:class:`Csbsipmthdrchistorystatstable <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbsipmthdrchistorystatstable>`
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.csbradiusstatstable = CiscoSessBorderCtrlrStatsMib.Csbradiusstatstable()
self.csbradiusstatstable.parent = self
self.csbrfbillrealmstatstable = CiscoSessBorderCtrlrStatsMib.Csbrfbillrealmstatstable()
self.csbrfbillrealmstatstable.parent = self
self.csbsipmthdcurrentstatstable = CiscoSessBorderCtrlrStatsMib.Csbsipmthdcurrentstatstable()
self.csbsipmthdcurrentstatstable.parent = self
self.csbsipmthdhistorystatstable = CiscoSessBorderCtrlrStatsMib.Csbsipmthdhistorystatstable()
self.csbsipmthdhistorystatstable.parent = self
self.csbsipmthdrccurrentstatstable = CiscoSessBorderCtrlrStatsMib.Csbsipmthdrccurrentstatstable()
self.csbsipmthdrccurrentstatstable.parent = self
self.csbsipmthdrchistorystatstable = CiscoSessBorderCtrlrStatsMib.Csbsipmthdrchistorystatstable()
self.csbsipmthdrchistorystatstable.parent = self
class Csbradiusstatstable(object):
"""
This table has the reporting statistics of various RADIUS
messages for RADIUS servers with which the client (SBC) shares a
secret. Each entry in this table is identified by a
value of csbRadiusStatsEntIndex. The other indices of this table
are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex defined
in csbCallStatsTable.
.. attribute:: csbradiusstatsentry
A conceptual row in the csbRadiusStatsTable. There is an entry in this table for each RADIUS server, as identified by a value of csbRadiusStatsEntIndex. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: list of :py:class:`Csbradiusstatsentry <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbradiusstatstable.Csbradiusstatsentry>`
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbradiusstatsentry = YList()
self.csbradiusstatsentry.parent = self
self.csbradiusstatsentry.name = 'csbradiusstatsentry'
class Csbradiusstatsentry(object):
"""
A conceptual row in the csbRadiusStatsTable. There is an
entry in this table for each RADIUS server, as identified by a
value of csbRadiusStatsEntIndex. The other indices of this
table are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex defined
in csbCallStatsTable.
.. attribute:: csbcallstatsinstanceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsinstanceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatsinstancetable.Csbcallstatsinstanceentry>`
.. attribute:: csbcallstatsserviceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsserviceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatstable.Csbcallstatsentry>`
.. attribute:: csbradiusstatsentindex <key>
This object indicates the index of the RADIUS client entity that this server is configured on. This index is assigned arbitrarily by the engine and is not saved over reboots
**type**\: int
**range:** 0..4294967295
.. attribute:: csbradiusstatsacsaccpts
This object indicates the number of RADIUS Access\-Accept packets (valid or invalid) received from this server
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: csbradiusstatsacschalls
This object indicates the number of RADIUS Access\-Challenge packets (valid or invalid) received from this server
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsacsrejects
This object indicates the number of RADIUS Access\-Reject packets (valid or invalid) received from this server
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsacsreqs
This object indicates the number of RADIUS Access\-Request packets sent to this server. This does not include retransmissions
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsacsrtrns
This object indicates the number of RADIUS Access\-Request packets retransmitted to this RADIUS server
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsactreqs
This object indicates the number of RADIUS Accounting\-Request packets sent to this server. This does not include retransmissions
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsactretrans
This object indicates the number of RADIUS Accounting\-Request packets retransmitted to this RADIUS server
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsactrsps
This object indicates the number of RADIUS Accounting\-Response packets (valid or invalid) received from this server
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsbadauths
This object indicates the number of RADIUS response packets containing invalid authenticators or Signature attributes received from this server
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsclientname
This object indicates the client name of the RADIUS client to which that these statistics apply
**type**\: str
.. attribute:: csbradiusstatsclienttype
This object indicates the type(authentication or accounting) of the RADIUS clients configured on SBC
**type**\: :py:class:`CiscosbcradiusclienttypeEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscosbcradiusclienttypeEnum>`
.. attribute:: csbradiusstatsdropped
This object indicates the number of RADIUS packets which were received from this server and dropped for some other reason
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsmalformedrsps
This object indicates the number of malformed RADIUS response packets received from this server. Malformed packets include packets with an invalid length. Bad authenticators, Signature attributes and unknown types are not included as malformed access responses
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatspending
This object indicates the number of RADIUS request packets destined for this server that have not yet timed out or received a response. This variable is incremented when a request is sent and decremented on receipt of the response or on a timeout or retransmission
**type**\: int
**range:** 0..4294967295
**units**\: packets
.. attribute:: csbradiusstatssrvrname
This object indicates the server name of the RADIUS server to which that these statistics apply
**type**\: str
.. attribute:: csbradiusstatstimeouts
This object indicates the number of RADIUS request timeouts to this server. After a timeout the client may retry to a different server or give up. A retry to a different server is counted as a request as well as a timeout
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
.. attribute:: csbradiusstatsunknowntype
This object indicates the number of RADIUS packets of unknown type which were received from this server
**type**\: int
**range:** 0..18446744073709551615
**units**\: packets
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbcallstatsinstanceindex = None
self.csbcallstatsserviceindex = None
self.csbradiusstatsentindex = None
self.csbradiusstatsacsaccpts = None
self.csbradiusstatsacschalls = None
self.csbradiusstatsacsrejects = None
self.csbradiusstatsacsreqs = None
self.csbradiusstatsacsrtrns = None
self.csbradiusstatsactreqs = None
self.csbradiusstatsactretrans = None
self.csbradiusstatsactrsps = None
self.csbradiusstatsbadauths = None
self.csbradiusstatsclientname = None
self.csbradiusstatsclienttype = None
self.csbradiusstatsdropped = None
self.csbradiusstatsmalformedrsps = None
self.csbradiusstatspending = None
self.csbradiusstatssrvrname = None
self.csbradiusstatstimeouts = None
self.csbradiusstatsunknowntype = None
@property
def _common_path(self):
if self.csbcallstatsinstanceindex is None:
raise YPYModelError('Key property csbcallstatsinstanceindex is None')
if self.csbcallstatsserviceindex is None:
raise YPYModelError('Key property csbcallstatsserviceindex is None')
if self.csbradiusstatsentindex is None:
raise YPYModelError('Key property csbradiusstatsentindex is None')
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRadiusStatsTable/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRadiusStatsEntry[CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsInstanceIndex = ' + str(self.csbcallstatsinstanceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsServiceIndex = ' + str(self.csbcallstatsserviceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRadiusStatsEntIndex = ' + str(self.csbradiusstatsentindex) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbcallstatsinstanceindex is not None:
return True
if self.csbcallstatsserviceindex is not None:
return True
if self.csbradiusstatsentindex is not None:
return True
if self.csbradiusstatsacsaccpts is not None:
return True
if self.csbradiusstatsacschalls is not None:
return True
if self.csbradiusstatsacsrejects is not None:
return True
if self.csbradiusstatsacsreqs is not None:
return True
if self.csbradiusstatsacsrtrns is not None:
return True
if self.csbradiusstatsactreqs is not None:
return True
if self.csbradiusstatsactretrans is not None:
return True
if self.csbradiusstatsactrsps is not None:
return True
if self.csbradiusstatsbadauths is not None:
return True
if self.csbradiusstatsclientname is not None:
return True
if self.csbradiusstatsclienttype is not None:
return True
if self.csbradiusstatsdropped is not None:
return True
if self.csbradiusstatsmalformedrsps is not None:
return True
if self.csbradiusstatspending is not None:
return True
if self.csbradiusstatssrvrname is not None:
return True
if self.csbradiusstatstimeouts is not None:
return True
if self.csbradiusstatsunknowntype is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbradiusstatstable.Csbradiusstatsentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRadiusStatsTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbradiusstatsentry is not None:
for child_ref in self.csbradiusstatsentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbradiusstatstable']['meta_info']
class Csbrfbillrealmstatstable(object):
"""
This table describes the Rf billing statistics information
which monitors the messages sent per\-realm by Rf billing
manager(SBC). SBC sends Rf billing data using Diameter as a
transport protocol. Rf billing uses only ACR and ACA Diameter
messages for the transport of billing data. The
Accounting\-Record\-Type AVP on the ACR message labels the type
of the accounting request. The following types are used by Rf
billing.
1. For session\-based charging, the types Start (session
begins), Interim (session is modified) and Stop (session ends)
are used.
2. For event\-based charging, the type Event is used when a
chargeable event occurs outside the scope of a session.
Each row of this table is identified by a value of
csbRfBillRealmStatsIndex and csbRfBillRealmStatsRealmName.
The other indices of this table are csbCallStatsInstanceIndex
defined in csbCallStatsInstanceTable and
csbCallStatsServiceIndex defined in csbCallStatsTable.
.. attribute:: csbrfbillrealmstatsentry
A conceptual row in the csbRfBillRealmStatsTable. There is an entry in this table for each realm, as identified by a value of csbRfBillRealmStatsIndex and csbRfBillRealmStatsRealmName. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: list of :py:class:`Csbrfbillrealmstatsentry <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbrfbillrealmstatstable.Csbrfbillrealmstatsentry>`
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbrfbillrealmstatsentry = YList()
self.csbrfbillrealmstatsentry.parent = self
self.csbrfbillrealmstatsentry.name = 'csbrfbillrealmstatsentry'
class Csbrfbillrealmstatsentry(object):
"""
A conceptual row in the csbRfBillRealmStatsTable. There
is an entry in this table for each realm, as identified by a
value of csbRfBillRealmStatsIndex and
csbRfBillRealmStatsRealmName. The other indices of this
table are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex defined
in csbCallStatsTable.
.. attribute:: csbcallstatsinstanceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsinstanceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatsinstancetable.Csbcallstatsinstanceentry>`
.. attribute:: csbcallstatsserviceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsserviceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatstable.Csbcallstatsentry>`
.. attribute:: csbrfbillrealmstatsindex <key>
This object indicates the billing method instance index. The range of valid values for this field is 0 \- 31
**type**\: int
**range:** 0..31
.. attribute:: csbrfbillrealmstatsrealmname <key>
This object indicates the realm for which these statistics are collected. The length of this object is zero when value is not assigned to it
**type**\: str
.. attribute:: csbrfbillrealmstatsfaileventacrs
This object indicates the total number of failed Event ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatsfailinterimacrs
This object indicates the total number of failed Interim ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatsfailstartacrs
This object indicates the total number of failed Start ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatsfailstopacrs
This object indicates the total number of failed Stop ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatssucceventacrs
This object indicates the total number of successful Event ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatssuccinterimacrs
This object indicates the total number of successful Interim ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatssuccstartacrs
This object indicates the total number of successful Start ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatssuccstopacrs
This object indicates the total number of successful Stop ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatstotaleventacrs
This object indicates the combined sum of successful and failed Event ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatstotalinterimacrs
This object indicates the combined sum of successful and failed Interim ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatstotalstartacrs
This object indicates the combined sum of successful and failed Start ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
.. attribute:: csbrfbillrealmstatstotalstopacrs
This object indicates the combined sum of successful and failed Stop ACRs since start of day or the last time the statistics were reset
**type**\: int
**range:** 0..4294967295
**units**\: ACRs
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbcallstatsinstanceindex = None
self.csbcallstatsserviceindex = None
self.csbrfbillrealmstatsindex = None
self.csbrfbillrealmstatsrealmname = None
self.csbrfbillrealmstatsfaileventacrs = None
self.csbrfbillrealmstatsfailinterimacrs = None
self.csbrfbillrealmstatsfailstartacrs = None
self.csbrfbillrealmstatsfailstopacrs = None
self.csbrfbillrealmstatssucceventacrs = None
self.csbrfbillrealmstatssuccinterimacrs = None
self.csbrfbillrealmstatssuccstartacrs = None
self.csbrfbillrealmstatssuccstopacrs = None
self.csbrfbillrealmstatstotaleventacrs = None
self.csbrfbillrealmstatstotalinterimacrs = None
self.csbrfbillrealmstatstotalstartacrs = None
self.csbrfbillrealmstatstotalstopacrs = None
@property
def _common_path(self):
if self.csbcallstatsinstanceindex is None:
raise YPYModelError('Key property csbcallstatsinstanceindex is None')
if self.csbcallstatsserviceindex is None:
raise YPYModelError('Key property csbcallstatsserviceindex is None')
if self.csbrfbillrealmstatsindex is None:
raise YPYModelError('Key property csbrfbillrealmstatsindex is None')
if self.csbrfbillrealmstatsrealmname is None:
raise YPYModelError('Key property csbrfbillrealmstatsrealmname is None')
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRfBillRealmStatsTable/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRfBillRealmStatsEntry[CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsInstanceIndex = ' + str(self.csbcallstatsinstanceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsServiceIndex = ' + str(self.csbcallstatsserviceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRfBillRealmStatsIndex = ' + str(self.csbrfbillrealmstatsindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRfBillRealmStatsRealmName = ' + str(self.csbrfbillrealmstatsrealmname) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbcallstatsinstanceindex is not None:
return True
if self.csbcallstatsserviceindex is not None:
return True
if self.csbrfbillrealmstatsindex is not None:
return True
if self.csbrfbillrealmstatsrealmname is not None:
return True
if self.csbrfbillrealmstatsfaileventacrs is not None:
return True
if self.csbrfbillrealmstatsfailinterimacrs is not None:
return True
if self.csbrfbillrealmstatsfailstartacrs is not None:
return True
if self.csbrfbillrealmstatsfailstopacrs is not None:
return True
if self.csbrfbillrealmstatssucceventacrs is not None:
return True
if self.csbrfbillrealmstatssuccinterimacrs is not None:
return True
if self.csbrfbillrealmstatssuccstartacrs is not None:
return True
if self.csbrfbillrealmstatssuccstopacrs is not None:
return True
if self.csbrfbillrealmstatstotaleventacrs is not None:
return True
if self.csbrfbillrealmstatstotalinterimacrs is not None:
return True
if self.csbrfbillrealmstatstotalstartacrs is not None:
return True
if self.csbrfbillrealmstatstotalstopacrs is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbrfbillrealmstatstable.Csbrfbillrealmstatsentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbRfBillRealmStatsTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbrfbillrealmstatsentry is not None:
for child_ref in self.csbrfbillrealmstatsentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbrfbillrealmstatstable']['meta_info']
class Csbsipmthdcurrentstatstable(object):
"""
This table reports count of SIP request and various SIP
responses for each SIP method on a SIP adjacency in a given
interval. Each entry in this table represents a SIP method, its
incoming and outgoing count, individual incoming and outgoing
count of various SIP responses for this method on a SIP
adjacency in a given interval. To understand the meaning of
interval please refer <Periodic Statistics> section in
description of ciscoSbcStatsMIB.
This table is indexed on csbSIPMthdCurrentStatsAdjName,
csbSIPMthdCurrentStatsMethod and
csbSIPMthdCurrentStatsInterval. The other indices of this
table are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex defined
in csbCallStatsTable.
.. attribute:: csbsipmthdcurrentstatsentry
A conceptual row in the csbSIPMthdCurrentStatsTable. Each row describes a SIP method and various responses count for this method on a given SIP adjacency and given interval. This table is indexed on csbSIPMthdCurrentStatsAdjName, csbSIPMthdCurrentStatsMethod and csbSIPMthdCurrentStatsInterval. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: list of :py:class:`Csbsipmthdcurrentstatsentry <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbsipmthdcurrentstatstable.Csbsipmthdcurrentstatsentry>`
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbsipmthdcurrentstatsentry = YList()
self.csbsipmthdcurrentstatsentry.parent = self
self.csbsipmthdcurrentstatsentry.name = 'csbsipmthdcurrentstatsentry'
class Csbsipmthdcurrentstatsentry(object):
"""
A conceptual row in the csbSIPMthdCurrentStatsTable. Each row
describes a SIP method and various responses count for this
method on a given SIP adjacency and given interval. This table
is indexed on csbSIPMthdCurrentStatsAdjName,
csbSIPMthdCurrentStatsMethod and
csbSIPMthdCurrentStatsInterval. The other indices of this
table are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex defined
in csbCallStatsTable.
.. attribute:: csbcallstatsinstanceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsinstanceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatsinstancetable.Csbcallstatsinstanceentry>`
.. attribute:: csbcallstatsserviceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsserviceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatstable.Csbcallstatsentry>`
.. attribute:: csbsipmthdcurrentstatsadjname <key>
This object indicates the name of the SIP adjacency for which stats related with SIP request and all kind of corresponding SIP responses are reported. The object acts as an index of the table
**type**\: str
.. attribute:: csbsipmthdcurrentstatsmethod <key>
This object indicates the SIP method Request. The object acts as an index of the table
**type**\: :py:class:`CiscosbcsipmethodEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscosbcsipmethodEnum>`
.. attribute:: csbsipmthdcurrentstatsinterval <key>
This object indicates the interval for which the periodic statistics information is to be displayed. The interval values can be 5 minutes, 15 minutes, 1 hour , 1 Day. This object acts as an index for the table
**type**\: :py:class:`CiscosbcperiodicstatsintervalEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscosbcperiodicstatsintervalEnum>`
.. attribute:: csbsipmthdcurrentstatsmethodname
This object indicates the text representation of the SIP method request. E.g. INVITE, ACK, BYE etc
**type**\: str
.. attribute:: csbsipmthdcurrentstatsreqin
This object indicates the total incoming SIP message requests of this type on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: requests
.. attribute:: csbsipmthdcurrentstatsreqout
This object indicates the total outgoing SIP message requests of this type on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: requests
.. attribute:: csbsipmthdcurrentstatsresp1xxin
This object indicates the total 1xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp1xxout
This object indicates the total 1xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp2xxin
This object indicates the total 2xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp2xxout
This object indicates the total 2xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp3xxin
This object indicates the total 3xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp3xxout
This object indicates the total 3xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp4xxin
This object indicates the total 4xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp4xxout
This object indicates the total 4xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp5xxin
This object indicates the total 5xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp5xxout
This object indicates the total 5xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp6xxin
This object indicates the total 6xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdcurrentstatsresp6xxout
This object indicates the total 6xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbcallstatsinstanceindex = None
self.csbcallstatsserviceindex = None
self.csbsipmthdcurrentstatsadjname = None
self.csbsipmthdcurrentstatsmethod = None
self.csbsipmthdcurrentstatsinterval = None
self.csbsipmthdcurrentstatsmethodname = None
self.csbsipmthdcurrentstatsreqin = None
self.csbsipmthdcurrentstatsreqout = None
self.csbsipmthdcurrentstatsresp1xxin = None
self.csbsipmthdcurrentstatsresp1xxout = None
self.csbsipmthdcurrentstatsresp2xxin = None
self.csbsipmthdcurrentstatsresp2xxout = None
self.csbsipmthdcurrentstatsresp3xxin = None
self.csbsipmthdcurrentstatsresp3xxout = None
self.csbsipmthdcurrentstatsresp4xxin = None
self.csbsipmthdcurrentstatsresp4xxout = None
self.csbsipmthdcurrentstatsresp5xxin = None
self.csbsipmthdcurrentstatsresp5xxout = None
self.csbsipmthdcurrentstatsresp6xxin = None
self.csbsipmthdcurrentstatsresp6xxout = None
@property
def _common_path(self):
if self.csbcallstatsinstanceindex is None:
raise YPYModelError('Key property csbcallstatsinstanceindex is None')
if self.csbcallstatsserviceindex is None:
raise YPYModelError('Key property csbcallstatsserviceindex is None')
if self.csbsipmthdcurrentstatsadjname is None:
raise YPYModelError('Key property csbsipmthdcurrentstatsadjname is None')
if self.csbsipmthdcurrentstatsmethod is None:
raise YPYModelError('Key property csbsipmthdcurrentstatsmethod is None')
if self.csbsipmthdcurrentstatsinterval is None:
raise YPYModelError('Key property csbsipmthdcurrentstatsinterval is None')
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdCurrentStatsTable/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdCurrentStatsEntry[CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsInstanceIndex = ' + str(self.csbcallstatsinstanceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsServiceIndex = ' + str(self.csbcallstatsserviceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdCurrentStatsAdjName = ' + str(self.csbsipmthdcurrentstatsadjname) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdCurrentStatsMethod = ' + str(self.csbsipmthdcurrentstatsmethod) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdCurrentStatsInterval = ' + str(self.csbsipmthdcurrentstatsinterval) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbcallstatsinstanceindex is not None:
return True
if self.csbcallstatsserviceindex is not None:
return True
if self.csbsipmthdcurrentstatsadjname is not None:
return True
if self.csbsipmthdcurrentstatsmethod is not None:
return True
if self.csbsipmthdcurrentstatsinterval is not None:
return True
if self.csbsipmthdcurrentstatsmethodname is not None:
return True
if self.csbsipmthdcurrentstatsreqin is not None:
return True
if self.csbsipmthdcurrentstatsreqout is not None:
return True
if self.csbsipmthdcurrentstatsresp1xxin is not None:
return True
if self.csbsipmthdcurrentstatsresp1xxout is not None:
return True
if self.csbsipmthdcurrentstatsresp2xxin is not None:
return True
if self.csbsipmthdcurrentstatsresp2xxout is not None:
return True
if self.csbsipmthdcurrentstatsresp3xxin is not None:
return True
if self.csbsipmthdcurrentstatsresp3xxout is not None:
return True
if self.csbsipmthdcurrentstatsresp4xxin is not None:
return True
if self.csbsipmthdcurrentstatsresp4xxout is not None:
return True
if self.csbsipmthdcurrentstatsresp5xxin is not None:
return True
if self.csbsipmthdcurrentstatsresp5xxout is not None:
return True
if self.csbsipmthdcurrentstatsresp6xxin is not None:
return True
if self.csbsipmthdcurrentstatsresp6xxout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbsipmthdcurrentstatstable.Csbsipmthdcurrentstatsentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdCurrentStatsTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbsipmthdcurrentstatsentry is not None:
for child_ref in self.csbsipmthdcurrentstatsentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbsipmthdcurrentstatstable']['meta_info']
class Csbsipmthdhistorystatstable(object):
"""
This table provide historical count of SIP request and various
SIP responses for each SIP method on a SIP adjacency in various
interval length defined by the csbSIPMthdHistoryStatsInterval
object. Each entry in this table represents a SIP method, its
incoming and outgoing count, individual incoming and outgoing
count of various SIP responses for this method on a SIP
adjacency in a given interval. The possible values of interval
will be previous 5 minutes, previous 15 minutes, previous 1 hour
and previous day. To understand the meaning of interval please
refer <Periodic Statistics> description of ciscoSbcStatsMIB.
This table is indexed on csbSIPMthdHistoryStatsAdjName,
csbSIPMthdHistoryStatsMethod and
csbSIPMthdHistoryStatsInterval. The other indices of this
table are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex defined
in csbCallStatsTable.
.. attribute:: csbsipmthdhistorystatsentry
A conceptual row in the csbSIPMthdHistoryStatsTable. The entries in this table are updated as interval completes in the csbSIPMthdCurrentStatsTable table and the data is moved from that table to this one. This table is indexed on csbSIPMthdHistoryStatsAdjName, csbSIPMthdHistoryStatsMethod and csbSIPMthdHistoryStatsInterval. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: list of :py:class:`Csbsipmthdhistorystatsentry <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbsipmthdhistorystatstable.Csbsipmthdhistorystatsentry>`
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbsipmthdhistorystatsentry = YList()
self.csbsipmthdhistorystatsentry.parent = self
self.csbsipmthdhistorystatsentry.name = 'csbsipmthdhistorystatsentry'
class Csbsipmthdhistorystatsentry(object):
"""
A conceptual row in the csbSIPMthdHistoryStatsTable. The
entries in this table are updated as interval completes in
the csbSIPMthdCurrentStatsTable table and the data is
moved from that table to this one.
This table is indexed on csbSIPMthdHistoryStatsAdjName,
csbSIPMthdHistoryStatsMethod and
csbSIPMthdHistoryStatsInterval. The other indices of this
table are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex
defined in csbCallStatsTable.
.. attribute:: csbcallstatsinstanceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsinstanceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatsinstancetable.Csbcallstatsinstanceentry>`
.. attribute:: csbcallstatsserviceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsserviceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatstable.Csbcallstatsentry>`
.. attribute:: csbsipmthdhistorystatsadjname <key>
This object indicates the name of the SIP adjacency for which stats related with SIP request and all kind of corresponding SIP responses are reported. The object acts as an index of the table
**type**\: str
.. attribute:: csbsipmthdhistorystatsmethod <key>
This object indicates the SIP method Request. The object acts as an index of the table
**type**\: :py:class:`CiscosbcsipmethodEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscosbcsipmethodEnum>`
.. attribute:: csbsipmthdhistorystatsinterval <key>
This object indicates the interval for which the historical statistics information is to be displayed. The interval values can be previous 5 minutes, previous 15 minutes, previous 1 hour and previous 1 Day. This object acts as an index for the table
**type**\: :py:class:`CiscosbcperiodicstatsintervalEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscosbcperiodicstatsintervalEnum>`
.. attribute:: csbsipmthdhistorystatsmethodname
This object indicates the text representation of the SIP method request. E.g. INVITE, ACK, BYE etc
**type**\: str
.. attribute:: csbsipmthdhistorystatsreqin
This object indicates the total incoming SIP message requests of this type on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: requests
.. attribute:: csbsipmthdhistorystatsreqout
This object indicates the total outgoing SIP message requests of this type on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: requests
.. attribute:: csbsipmthdhistorystatsresp1xxin
This object indicates the total 1xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp1xxout
This object indicates the total 1xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp2xxin
This object indicates the total 2xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp2xxout
This object indicates the total 2xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp3xxin
This object indicates the total 3xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp3xxout
This object indicates the total 3xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp4xxin
This object indicates the total 4xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp4xxout
This object indicates the total 4xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp5xxin
This object indicates the total 5xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp5xxout
This object indicates the total 5xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp6xxin
This object indicates the total 6xx responses for this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdhistorystatsresp6xxout
This object indicates the total 6xx responses for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbcallstatsinstanceindex = None
self.csbcallstatsserviceindex = None
self.csbsipmthdhistorystatsadjname = None
self.csbsipmthdhistorystatsmethod = None
self.csbsipmthdhistorystatsinterval = None
self.csbsipmthdhistorystatsmethodname = None
self.csbsipmthdhistorystatsreqin = None
self.csbsipmthdhistorystatsreqout = None
self.csbsipmthdhistorystatsresp1xxin = None
self.csbsipmthdhistorystatsresp1xxout = None
self.csbsipmthdhistorystatsresp2xxin = None
self.csbsipmthdhistorystatsresp2xxout = None
self.csbsipmthdhistorystatsresp3xxin = None
self.csbsipmthdhistorystatsresp3xxout = None
self.csbsipmthdhistorystatsresp4xxin = None
self.csbsipmthdhistorystatsresp4xxout = None
self.csbsipmthdhistorystatsresp5xxin = None
self.csbsipmthdhistorystatsresp5xxout = None
self.csbsipmthdhistorystatsresp6xxin = None
self.csbsipmthdhistorystatsresp6xxout = None
@property
def _common_path(self):
if self.csbcallstatsinstanceindex is None:
raise YPYModelError('Key property csbcallstatsinstanceindex is None')
if self.csbcallstatsserviceindex is None:
raise YPYModelError('Key property csbcallstatsserviceindex is None')
if self.csbsipmthdhistorystatsadjname is None:
raise YPYModelError('Key property csbsipmthdhistorystatsadjname is None')
if self.csbsipmthdhistorystatsmethod is None:
raise YPYModelError('Key property csbsipmthdhistorystatsmethod is None')
if self.csbsipmthdhistorystatsinterval is None:
raise YPYModelError('Key property csbsipmthdhistorystatsinterval is None')
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdHistoryStatsTable/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdHistoryStatsEntry[CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsInstanceIndex = ' + str(self.csbcallstatsinstanceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsServiceIndex = ' + str(self.csbcallstatsserviceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdHistoryStatsAdjName = ' + str(self.csbsipmthdhistorystatsadjname) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdHistoryStatsMethod = ' + str(self.csbsipmthdhistorystatsmethod) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdHistoryStatsInterval = ' + str(self.csbsipmthdhistorystatsinterval) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbcallstatsinstanceindex is not None:
return True
if self.csbcallstatsserviceindex is not None:
return True
if self.csbsipmthdhistorystatsadjname is not None:
return True
if self.csbsipmthdhistorystatsmethod is not None:
return True
if self.csbsipmthdhistorystatsinterval is not None:
return True
if self.csbsipmthdhistorystatsmethodname is not None:
return True
if self.csbsipmthdhistorystatsreqin is not None:
return True
if self.csbsipmthdhistorystatsreqout is not None:
return True
if self.csbsipmthdhistorystatsresp1xxin is not None:
return True
if self.csbsipmthdhistorystatsresp1xxout is not None:
return True
if self.csbsipmthdhistorystatsresp2xxin is not None:
return True
if self.csbsipmthdhistorystatsresp2xxout is not None:
return True
if self.csbsipmthdhistorystatsresp3xxin is not None:
return True
if self.csbsipmthdhistorystatsresp3xxout is not None:
return True
if self.csbsipmthdhistorystatsresp4xxin is not None:
return True
if self.csbsipmthdhistorystatsresp4xxout is not None:
return True
if self.csbsipmthdhistorystatsresp5xxin is not None:
return True
if self.csbsipmthdhistorystatsresp5xxout is not None:
return True
if self.csbsipmthdhistorystatsresp6xxin is not None:
return True
if self.csbsipmthdhistorystatsresp6xxout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbsipmthdhistorystatstable.Csbsipmthdhistorystatsentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdHistoryStatsTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbsipmthdhistorystatsentry is not None:
for child_ref in self.csbsipmthdhistorystatsentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbsipmthdhistorystatstable']['meta_info']
class Csbsipmthdrccurrentstatstable(object):
"""
This table reports SIP method request and response code
statistics for each method and response code combination on
given SIP adjacency in a given interval. To understand the
meaning of interval please refer <Periodic Statistics> section
in description of ciscoSbcStatsMIB. An exact lookup will return
a row only if \-
1) detailed response code statistics are turned on in SBC
2) response code messages sent or received is non zero for
given SIP adjacency, method and interval.
Also an inexact lookup will only return rows for messages with
non\-zero counts, to protect the user from large numbers of rows
for response codes which have not been received or sent.
.. attribute:: csbsipmthdrccurrentstatsentry
A conceptual row in the csbSIPMthdRCCurrentStatsTable. Each entry in this table represents a method and response code combination. Each entry in this table is identified by a value of csbSIPMthdRCCurrentStatsAdjName, csbSIPMthdRCCurrentStatsMethod, csbSIPMthdRCCurrentStatsRespCode and csbSIPMthdRCCurrentStatsInterval. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: list of :py:class:`Csbsipmthdrccurrentstatsentry <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbsipmthdrccurrentstatstable.Csbsipmthdrccurrentstatsentry>`
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbsipmthdrccurrentstatsentry = YList()
self.csbsipmthdrccurrentstatsentry.parent = self
self.csbsipmthdrccurrentstatsentry.name = 'csbsipmthdrccurrentstatsentry'
class Csbsipmthdrccurrentstatsentry(object):
"""
A conceptual row in the csbSIPMthdRCCurrentStatsTable. Each
entry in this table represents a method and response code
combination. Each entry in this table is identified by a value
of csbSIPMthdRCCurrentStatsAdjName,
csbSIPMthdRCCurrentStatsMethod,
csbSIPMthdRCCurrentStatsRespCode and
csbSIPMthdRCCurrentStatsInterval. The other indices of this
table are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex defined
in csbCallStatsTable.
.. attribute:: csbcallstatsinstanceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsinstanceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatsinstancetable.Csbcallstatsinstanceentry>`
.. attribute:: csbcallstatsserviceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsserviceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatstable.Csbcallstatsentry>`
.. attribute:: csbsipmthdrccurrentstatsadjname <key>
This identifies the name of the adjacency for which statistics are reported. This object acts as an index for the table
**type**\: str
.. attribute:: csbsipmthdrccurrentstatsmethod <key>
This object indicates the SIP method request. This object acts as an index for the table
**type**\: :py:class:`CiscosbcsipmethodEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscosbcsipmethodEnum>`
.. attribute:: csbsipmthdrccurrentstatsrespcode <key>
This object indicates the response code for the SIP message request. The range of valid values for SIP response codes is 100 \- 999. This object acts as an index for the table
**type**\: int
**range:** 0..4294967295
.. attribute:: csbsipmthdrccurrentstatsinterval <key>
This object identifies the interval for which the periodic statistics information is to be displayed. The interval values can be 5 min, 15 mins, 1 hour , 1 Day. This object acts as an index for the table
**type**\: :py:class:`CiscosbcperiodicstatsintervalEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscosbcperiodicstatsintervalEnum>`
.. attribute:: csbsipmthdrccurrentstatsmethodname
This object indicates the text representation of the SIP method request. E.g. INVITE, ACK, BYE etc
**type**\: str
.. attribute:: csbsipmthdrccurrentstatsrespin
This object indicates the total SIP messages with this response code this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdrccurrentstatsrespout
This object indicates the total SIP messages with this response code for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbcallstatsinstanceindex = None
self.csbcallstatsserviceindex = None
self.csbsipmthdrccurrentstatsadjname = None
self.csbsipmthdrccurrentstatsmethod = None
self.csbsipmthdrccurrentstatsrespcode = None
self.csbsipmthdrccurrentstatsinterval = None
self.csbsipmthdrccurrentstatsmethodname = None
self.csbsipmthdrccurrentstatsrespin = None
self.csbsipmthdrccurrentstatsrespout = None
@property
def _common_path(self):
if self.csbcallstatsinstanceindex is None:
raise YPYModelError('Key property csbcallstatsinstanceindex is None')
if self.csbcallstatsserviceindex is None:
raise YPYModelError('Key property csbcallstatsserviceindex is None')
if self.csbsipmthdrccurrentstatsadjname is None:
raise YPYModelError('Key property csbsipmthdrccurrentstatsadjname is None')
if self.csbsipmthdrccurrentstatsmethod is None:
raise YPYModelError('Key property csbsipmthdrccurrentstatsmethod is None')
if self.csbsipmthdrccurrentstatsrespcode is None:
raise YPYModelError('Key property csbsipmthdrccurrentstatsrespcode is None')
if self.csbsipmthdrccurrentstatsinterval is None:
raise YPYModelError('Key property csbsipmthdrccurrentstatsinterval is None')
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCCurrentStatsTable/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCCurrentStatsEntry[CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsInstanceIndex = ' + str(self.csbcallstatsinstanceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsServiceIndex = ' + str(self.csbcallstatsserviceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCCurrentStatsAdjName = ' + str(self.csbsipmthdrccurrentstatsadjname) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCCurrentStatsMethod = ' + str(self.csbsipmthdrccurrentstatsmethod) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCCurrentStatsRespCode = ' + str(self.csbsipmthdrccurrentstatsrespcode) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCCurrentStatsInterval = ' + str(self.csbsipmthdrccurrentstatsinterval) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbcallstatsinstanceindex is not None:
return True
if self.csbcallstatsserviceindex is not None:
return True
if self.csbsipmthdrccurrentstatsadjname is not None:
return True
if self.csbsipmthdrccurrentstatsmethod is not None:
return True
if self.csbsipmthdrccurrentstatsrespcode is not None:
return True
if self.csbsipmthdrccurrentstatsinterval is not None:
return True
if self.csbsipmthdrccurrentstatsmethodname is not None:
return True
if self.csbsipmthdrccurrentstatsrespin is not None:
return True
if self.csbsipmthdrccurrentstatsrespout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbsipmthdrccurrentstatstable.Csbsipmthdrccurrentstatsentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCCurrentStatsTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbsipmthdrccurrentstatsentry is not None:
for child_ref in self.csbsipmthdrccurrentstatsentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbsipmthdrccurrentstatstable']['meta_info']
class Csbsipmthdrchistorystatstable(object):
"""
This table reports historical data for SIP method request and
response code statistics for each method and response code
combination in a given past interval. The possible values of
interval will be previous 5 minutes, previous 15 minutes,
previous 1 hour and previous day. To understand the
meaning of interval please refer <Periodic Statistics> section
in description of ciscoSbcStatsMIB. An exact lookup will return
a row only if \-
1) detailed response code statistics are turned on in SBC
2) response code messages sent or received is non zero for
given SIP adjacency, method and interval.
Also an inexact lookup will only return rows for messages with
non\-zero counts, to protect the user from large numbers of rows
for response codes which have not been received or sent.
.. attribute:: csbsipmthdrchistorystatsentry
A conceptual row in the csbSIPMthdRCHistoryStatsTable. The entries in this table are updated as interval completes in the csbSIPMthdRCCurrentStatsTable table and the data is moved from that table to this one. Each entry in this table is identified by a value of csbSIPMthdRCHistoryStatsAdjName, csbSIPMthdRCHistoryStatsMethod, csbSIPMthdRCHistoryStatsRespCode and csbSIPMthdRCHistoryStatsInterval. The other indices of this table are csbCallStatsInstanceIndex defined in csbCallStatsInstanceTable and csbCallStatsServiceIndex defined in csbCallStatsTable
**type**\: list of :py:class:`Csbsipmthdrchistorystatsentry <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscoSessBorderCtrlrStatsMib.Csbsipmthdrchistorystatstable.Csbsipmthdrchistorystatsentry>`
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbsipmthdrchistorystatsentry = YList()
self.csbsipmthdrchistorystatsentry.parent = self
self.csbsipmthdrchistorystatsentry.name = 'csbsipmthdrchistorystatsentry'
class Csbsipmthdrchistorystatsentry(object):
"""
A conceptual row in the csbSIPMthdRCHistoryStatsTable. The
entries in this table are updated as interval completes in
the csbSIPMthdRCCurrentStatsTable table and the data is
moved from that table to this one. Each entry in this table
is identified by a value of csbSIPMthdRCHistoryStatsAdjName,
csbSIPMthdRCHistoryStatsMethod,
csbSIPMthdRCHistoryStatsRespCode and
csbSIPMthdRCHistoryStatsInterval. The other indices of this
table are csbCallStatsInstanceIndex defined in
csbCallStatsInstanceTable and csbCallStatsServiceIndex defined
in csbCallStatsTable.
.. attribute:: csbcallstatsinstanceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsinstanceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatsinstancetable.Csbcallstatsinstanceentry>`
.. attribute:: csbcallstatsserviceindex <key>
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`csbcallstatsserviceindex <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscoSessBorderCtrlrCallStatsMib.Csbcallstatstable.Csbcallstatsentry>`
.. attribute:: csbsipmthdrchistorystatsadjname <key>
This identifies the name of the adjacency for which statistics are reported. This object acts as an index for the table
**type**\: str
.. attribute:: csbsipmthdrchistorystatsmethod <key>
This object indicates the SIP method request. This object acts as an index for the table
**type**\: :py:class:`CiscosbcsipmethodEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB.CiscosbcsipmethodEnum>`
.. attribute:: csbsipmthdrchistorystatsrespcode <key>
This object indicates the response code for the SIP message request. The range of valid values for SIP response codes is 100 \- 999. This object acts as an index for the table
**type**\: int
**range:** 0..4294967295
.. attribute:: csbsipmthdrchistorystatsinterval <key>
This object identifies the interval for which the periodic statistics information is to be displayed. The interval values can be previous 5 min, previous 15 mins, previous 1 hour , previous 1 Day. This object acts as an index for the table
**type**\: :py:class:`CiscosbcperiodicstatsintervalEnum <ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB.CiscosbcperiodicstatsintervalEnum>`
.. attribute:: csbsipmthdrchistorystatsmethodname
This object indicates the text representation of the SIP method request. E.g. INVITE, ACK, BYE etc
**type**\: str
.. attribute:: csbsipmthdrchistorystatsrespin
This object indicates the total SIP messages with this response code this method received on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
.. attribute:: csbsipmthdrchistorystatsrespout
This object indicates the total SIP messages with this response code for this method sent on this SIP adjacency
**type**\: int
**range:** 0..4294967295
**units**\: responses
"""
_prefix = 'CISCO-SESS-BORDER-CTRLR-STATS-MIB'
_revision = '2010-09-15'
def __init__(self):
self.parent = None
self.csbcallstatsinstanceindex = None
self.csbcallstatsserviceindex = None
self.csbsipmthdrchistorystatsadjname = None
self.csbsipmthdrchistorystatsmethod = None
self.csbsipmthdrchistorystatsrespcode = None
self.csbsipmthdrchistorystatsinterval = None
self.csbsipmthdrchistorystatsmethodname = None
self.csbsipmthdrchistorystatsrespin = None
self.csbsipmthdrchistorystatsrespout = None
@property
def _common_path(self):
if self.csbcallstatsinstanceindex is None:
raise YPYModelError('Key property csbcallstatsinstanceindex is None')
if self.csbcallstatsserviceindex is None:
raise YPYModelError('Key property csbcallstatsserviceindex is None')
if self.csbsipmthdrchistorystatsadjname is None:
raise YPYModelError('Key property csbsipmthdrchistorystatsadjname is None')
if self.csbsipmthdrchistorystatsmethod is None:
raise YPYModelError('Key property csbsipmthdrchistorystatsmethod is None')
if self.csbsipmthdrchistorystatsrespcode is None:
raise YPYModelError('Key property csbsipmthdrchistorystatsrespcode is None')
if self.csbsipmthdrchistorystatsinterval is None:
raise YPYModelError('Key property csbsipmthdrchistorystatsinterval is None')
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCHistoryStatsTable/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCHistoryStatsEntry[CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsInstanceIndex = ' + str(self.csbcallstatsinstanceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbCallStatsServiceIndex = ' + str(self.csbcallstatsserviceindex) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCHistoryStatsAdjName = ' + str(self.csbsipmthdrchistorystatsadjname) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCHistoryStatsMethod = ' + str(self.csbsipmthdrchistorystatsmethod) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCHistoryStatsRespCode = ' + str(self.csbsipmthdrchistorystatsrespcode) + '][CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCHistoryStatsInterval = ' + str(self.csbsipmthdrchistorystatsinterval) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbcallstatsinstanceindex is not None:
return True
if self.csbcallstatsserviceindex is not None:
return True
if self.csbsipmthdrchistorystatsadjname is not None:
return True
if self.csbsipmthdrchistorystatsmethod is not None:
return True
if self.csbsipmthdrchistorystatsrespcode is not None:
return True
if self.csbsipmthdrchistorystatsinterval is not None:
return True
if self.csbsipmthdrchistorystatsmethodname is not None:
return True
if self.csbsipmthdrchistorystatsrespin is not None:
return True
if self.csbsipmthdrchistorystatsrespout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbsipmthdrchistorystatstable.Csbsipmthdrchistorystatsentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB/CISCO-SESS-BORDER-CTRLR-STATS-MIB:csbSIPMthdRCHistoryStatsTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbsipmthdrchistorystatsentry is not None:
for child_ref in self.csbsipmthdrchistorystatsentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib.Csbsipmthdrchistorystatstable']['meta_info']
@property
def _common_path(self):
return '/CISCO-SESS-BORDER-CTRLR-STATS-MIB:CISCO-SESS-BORDER-CTRLR-STATS-MIB'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.csbradiusstatstable is not None and self.csbradiusstatstable._has_data():
return True
if self.csbrfbillrealmstatstable is not None and self.csbrfbillrealmstatstable._has_data():
return True
if self.csbsipmthdcurrentstatstable is not None and self.csbsipmthdcurrentstatstable._has_data():
return True
if self.csbsipmthdhistorystatstable is not None and self.csbsipmthdhistorystatstable._has_data():
return True
if self.csbsipmthdrccurrentstatstable is not None and self.csbsipmthdrccurrentstatstable._has_data():
return True
if self.csbsipmthdrchistorystatstable is not None and self.csbsipmthdrchistorystatstable._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_SESS_BORDER_CTRLR_STATS_MIB as meta
return meta._meta_table['CiscoSessBorderCtrlrStatsMib']['meta_info']
| apache-2.0 | 1,297,357,869,498,135,000 | 44.94701 | 947 | 0.6242 | false |
Bitcoin-ABC/bitcoin-abc | test/functional/mempool_updatefromblock.py | 1 | 6905 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool descendants/ancestors information update.
Test mempool update of transaction descendants/ancestors information (count, size)
when transactions have been re-added from a disconnected block to the mempool.
"""
import time
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class MempoolUpdateFromBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
['-limitdescendantsize=5000', '-limitancestorsize=5000']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def transaction_graph_test(self, size, n_tx_to_mine=None,
start_input_txid='', end_address='',
fee=Decimal(1000)):
"""Create an acyclic tournament (a type of directed graph) of
transactions and use it for testing.
Keyword arguments:
size -- the order N of the tournament which is equal to the number
of the created transactions
n_tx_to_mine -- the number of transaction that should be mined into a block
If all of the N created transactions tx[0]..tx[N-1] reside in the mempool,
the following holds:
the tx[K] transaction:
- has N-K descendants (including this one), and
- has K+1 ancestors (including this one)
More details: https://en.wikipedia.org/wiki/Tournament_(graph_theory)
"""
if not start_input_txid:
start_input_txid = self.nodes[0].getblock(
self.nodes[0].getblockhash(1))['tx'][0]
if not end_address:
end_address = self.nodes[0].getnewaddress()
first_block_hash = ''
tx_id = []
tx_size = []
self.log.info('Creating {} transactions...'.format(size))
for i in range(0, size):
self.log.debug('Preparing transaction #{}...'.format(i))
# Prepare inputs.
if i == 0:
inputs = [{'txid': start_input_txid, 'vout': 0}]
inputs_value = self.nodes[0].gettxout(
start_input_txid, 0)['value']
else:
inputs = []
inputs_value = 0
for j, tx in enumerate(tx_id[0:i]):
# Transaction tx[K] is a child of each of previous
# transactions tx[0]..tx[K-1] at their output K-1.
vout = i - j - 1
inputs.append({'txid': tx_id[j], 'vout': vout})
inputs_value += self.nodes[0].gettxout(tx, vout)['value']
self.log.debug('inputs={}'.format(inputs))
self.log.debug('inputs_value={}'.format(inputs_value))
# Prepare outputs.
tx_count = i + 1
if tx_count < size:
# Transaction tx[K] is an ancestor of each of subsequent
# transactions tx[K+1]..tx[N-1].
n_outputs = size - tx_count
output_value = (
(inputs_value -
fee) /
Decimal(n_outputs)).quantize(
Decimal('0.01'))
outputs = {}
for n in range(0, n_outputs):
outputs[self.nodes[0].getnewaddress()] = output_value
else:
output_value = (
inputs_value -
fee).quantize(
Decimal('0.01'))
outputs = {end_address: output_value}
self.log.debug('output_value={}'.format(output_value))
self.log.debug('outputs={}'.format(outputs))
# Create a new transaction.
unsigned_raw_tx = self.nodes[0].createrawtransaction(
inputs, outputs)
signed_raw_tx = self.nodes[0].signrawtransactionwithwallet(
unsigned_raw_tx)
tx_id.append(
self.nodes[0].sendrawtransaction(
signed_raw_tx['hex']))
tx_size.append(self.nodes[0].getrawmempool(
True)[tx_id[-1]]['size'])
if tx_count in n_tx_to_mine:
# The created transactions are mined into blocks by batches.
self.log.info('The batch of {} transactions has been accepted'
' into the mempool.'.format(len(self.nodes[0].getrawmempool())))
block_hash = self.nodes[0].generate(1)[0]
if not first_block_hash:
first_block_hash = block_hash
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.info(
'All of the transactions from the current batch have been'
' mined into a block.')
elif tx_count == size:
# At the end all of the mined blocks are invalidated, and all of the created
# transactions should be re-added from disconnected blocks to
# the mempool.
self.log.info('The last batch of {} transactions has been'
' accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
start = time.time()
self.nodes[0].invalidateblock(first_block_hash)
end = time.time()
assert_equal(len(self.nodes[0].getrawmempool()), size)
self.log.info(
'All of the recently mined transactions have been re-added'
' into the mempool in {} seconds.'.format(end - start))
self.log.info(
'Checking descendants/ancestors properties of all of the'
' in-mempool transactions...')
for k, tx in enumerate(tx_id):
self.log.debug('Check transaction #{}.'.format(k))
assert_equal(self.nodes[0].getrawmempool(True)[
tx]['descendantcount'], size - k)
assert_equal(self.nodes[0].getrawmempool(True)[
tx]['descendantsize'], sum(tx_size[k:size]))
assert_equal(self.nodes[0].getrawmempool(
True)[tx]['ancestorcount'], k + 1)
assert_equal(self.nodes[0].getrawmempool(True)[
tx]['ancestorsize'], sum(tx_size[0:(k + 1)]))
def run_test(self):
# Use batch size limited by DEFAULT_ANCESTOR_LIMIT = 50 to not fire
# "too many unconfirmed parents" error.
self.transaction_graph_test(size=200, n_tx_to_mine=[50, 100, 150])
if __name__ == '__main__':
MempoolUpdateFromBlockTest().main()
| mit | -3,708,750,498,633,921,000 | 42.702532 | 103 | 0.544388 | false |
noironetworks/horizon | horizon/workflows/base.py | 1 | 35452 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from importlib import import_module
import inspect
import logging
from django.conf import settings
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django import template
from django.template.defaultfilters import linebreaks
from django.template.defaultfilters import safe
from django.template.defaultfilters import slugify
from django import urls
from django.utils.encoding import force_text
from django.utils import module_loading
from django.utils.translation import ugettext_lazy as _
from openstack_auth import policy
import six
from horizon import base
from horizon import exceptions
from horizon.templatetags.horizon import has_permissions
from horizon.utils import html
LOG = logging.getLogger(__name__)
class WorkflowContext(dict):
def __init__(self, workflow, *args, **kwargs):
super(WorkflowContext, self).__init__(*args, **kwargs)
self._workflow = workflow
def __setitem__(self, key, val):
super(WorkflowContext, self).__setitem__(key, val)
return self._workflow._trigger_handlers(key)
def __delitem__(self, key):
return self.__setitem__(key, None)
def set(self, key, val):
return self.__setitem__(key, val)
def unset(self, key):
return self.__delitem__(key)
class ActionMetaclass(forms.forms.DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
# Pop Meta for later processing
opts = attrs.pop("Meta", None)
# Create our new class
cls = super(ActionMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Process options from Meta
cls.name = getattr(opts, "name", name)
cls.slug = getattr(opts, "slug", slugify(name))
cls.permissions = getattr(opts, "permissions", ())
cls.policy_rules = getattr(opts, "policy_rules", ())
cls.progress_message = getattr(opts,
"progress_message",
_("Processing..."))
cls.help_text = getattr(opts, "help_text", "")
cls.help_text_template = getattr(opts, "help_text_template", None)
return cls
@six.python_2_unicode_compatible
@six.add_metaclass(ActionMetaclass)
class Action(forms.Form):
"""An ``Action`` represents an atomic logical interaction with the system.
This is easier to understand with a conceptual example: in the context of
a "launch instance" workflow, actions would include "naming the instance",
"selecting an image", and ultimately "launching the instance".
Because ``Actions`` are always interactive, they always provide form
controls, and thus inherit from Django's ``Form`` class. However, they
have some additional intelligence added to them:
* ``Actions`` are aware of the permissions required to complete them.
* ``Actions`` have a meta-level concept of "help text" which is meant to be
displayed in such a way as to give context to the action regardless of
where the action is presented in a site or workflow.
* ``Actions`` understand how to handle their inputs and produce outputs,
much like :class:`~horizon.forms.SelfHandlingForm` does now.
``Action`` classes may define the following attributes in a ``Meta``
class within them:
.. attribute:: name
The verbose name for this action. Defaults to the name of the class.
.. attribute:: slug
A semi-unique slug for this action. Defaults to the "slugified" name
of the class.
.. attribute:: permissions
A list of permission names which this action requires in order to be
completed. Defaults to an empty list (``[]``).
.. attribute:: policy_rules
list of scope and rule tuples to do policy checks on, the
composition of which is (scope, rule)
* scope: service type managing the policy for action
* rule: string representing the action to be checked
for a policy that requires a single rule check::
policy_rules should look like
"(("compute", "compute:create_instance"),)"
for a policy that requires multiple rule checks::
rules should look like
"(("identity", "identity:list_users"),
("identity", "identity:list_roles"))"
where two service-rule clauses are OR-ed.
.. attribute:: help_text
A string of simple help text to be displayed alongside the Action's
fields.
.. attribute:: help_text_template
A path to a template which contains more complex help text to be
displayed alongside the Action's fields. In conjunction with
:meth:`~horizon.workflows.Action.get_help_text` method you can
customize your help text template to display practically anything.
"""
def __init__(self, request, context, *args, **kwargs):
if request.method == "POST":
super(Action, self).__init__(request.POST, initial=context)
else:
super(Action, self).__init__(initial=context)
if not hasattr(self, "handle"):
raise AttributeError("The action %s must define a handle method."
% self.__class__.__name__)
self.request = request
self._populate_choices(request, context)
self.required_css_class = 'required'
def __str__(self):
return force_text(self.name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _populate_choices(self, request, context):
for field_name, bound_field in self.fields.items():
meth = getattr(self, "populate_%s_choices" % field_name, None)
if meth is not None and callable(meth):
bound_field.choices = meth(request, context)
def get_help_text(self, extra_context=None):
"""Returns the help text for this step."""
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
text += tmpl.render(extra_context, self.request)
else:
text += linebreaks(force_text(self.help_text))
return safe(text)
def add_action_error(self, message):
"""Adds an error to the Action's Step based on API issues."""
self.errors[NON_FIELD_ERRORS] = self.error_class([message])
def handle(self, request, context):
"""Handles any requisite processing for this action.
The method should return either ``None`` or a dictionary of data
to be passed to :meth:`~horizon.workflows.Step.contribute`.
Returns ``None`` by default, effectively making it a no-op.
"""
return None
class MembershipAction(Action):
"""An action that allows a user to add/remove members from a group.
Extend the Action class with additional helper method for membership
management.
"""
def get_default_role_field_name(self):
return "default_" + self.slug + "_role"
def get_member_field_name(self, role_id):
return self.slug + "_role_" + role_id
@six.python_2_unicode_compatible
class Step(object):
"""A wrapper around an action which defines its context in a workflow.
It knows about details such as:
* The workflow's context data (data passed from step to step).
* The data which must be present in the context to begin this step (the
step's dependencies).
* The keys which will be added to the context data upon completion of the
step.
* The connections between this step's fields and changes in the context
data (e.g. if that piece of data changes, what needs to be updated in
this step).
A ``Step`` class has the following attributes:
.. attribute:: action_class
The :class:`~horizon.workflows.Action` class which this step wraps.
.. attribute:: depends_on
A list of context data keys which this step requires in order to
begin interaction.
.. attribute:: contributes
A list of keys which this step will contribute to the workflow's
context data. Optional keys should still be listed, even if their
values may be set to ``None``.
.. attribute:: connections
A dictionary which maps context data key names to lists of callbacks.
The callbacks may be functions, dotted python paths to functions
which may be imported, or dotted strings beginning with ``"self"``
to indicate methods on the current ``Step`` instance.
.. attribute:: before
Another ``Step`` class. This optional attribute is used to provide
control over workflow ordering when steps are dynamically added to
workflows. The workflow mechanism will attempt to place the current
step before the step specified in the attribute.
.. attribute:: after
Another ``Step`` class. This attribute has the same purpose as
:meth:`~horizon.workflows.Step.before` except that it will instead
attempt to place the current step after the given step.
.. attribute:: help_text
A string of simple help text which will be prepended to the ``Action``
class' help text if desired.
.. attribute:: template_name
A path to a template which will be used to render this step. In
general the default common template should be used. Default:
``"horizon/common/_workflow_step.html"``.
.. attribute:: has_errors
A boolean value which indicates whether or not this step has any
errors on the action within it or in the scope of the workflow. This
attribute will only accurately reflect this status after validation
has occurred.
.. attribute:: slug
Inherited from the ``Action`` class.
.. attribute:: name
Inherited from the ``Action`` class.
.. attribute:: permissions
Inherited from the ``Action`` class.
"""
action_class = None
depends_on = ()
contributes = ()
connections = None
before = None
after = None
help_text = ""
template_name = "horizon/common/_workflow_step.html"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return force_text(self.name)
def __init__(self, workflow):
super(Step, self).__init__()
self.workflow = workflow
cls = self.__class__.__name__
if not (self.action_class and issubclass(self.action_class, Action)):
raise AttributeError("action_class not specified for %s." % cls)
self.slug = self.action_class.slug
self.name = self.action_class.name
self.permissions = self.action_class.permissions
self.policy_rules = self.action_class.policy_rules
self.has_errors = False
self._handlers = {}
if self.connections is None:
# We want a dict, but don't want to declare a mutable type on the
# class directly.
self.connections = {}
# Gather our connection handlers and make sure they exist.
for key, handlers in self.connections.items():
self._handlers[key] = []
# TODO(gabriel): This is a poor substitute for broader handling
if not isinstance(handlers, (list, tuple)):
raise TypeError("The connection handlers for %s must be a "
"list or tuple." % cls)
for possible_handler in handlers:
if callable(possible_handler):
# If it's callable we know the function exists and is valid
self._handlers[key].append(possible_handler)
continue
elif not isinstance(possible_handler, six.string_types):
raise TypeError("Connection handlers must be either "
"callables or strings.")
bits = possible_handler.split(".")
if bits[0] == "self":
root = self
for bit in bits[1:]:
try:
root = getattr(root, bit)
except AttributeError:
raise AttributeError("The connection handler %s "
"could not be found on %s."
% (possible_handler, cls))
handler = root
elif len(bits) == 1:
# Import by name from local module not supported
raise ValueError("Importing a local function as a string "
"is not supported for the connection "
"handler %s on %s."
% (possible_handler, cls))
else:
# Try a general import
module_name = ".".join(bits[:-1])
try:
mod = import_module(module_name)
handler = getattr(mod, bits[-1])
except ImportError:
raise ImportError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
except AttributeError:
raise AttributeError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
self._handlers[key].append(handler)
@property
def action(self):
if not getattr(self, "_action", None):
try:
# Hook in the action context customization.
workflow_context = dict(self.workflow.context)
context = self.prepare_action_context(self.workflow.request,
workflow_context)
self._action = self.action_class(self.workflow.request,
context)
except Exception:
LOG.exception("Problem instantiating action class.")
raise
return self._action
def prepare_action_context(self, request, context):
"""Hook to customize how the workflow context is passed to the action.
This is the reverse of what "contribute" does to make the
action outputs sane for the workflow. Changes to the context are not
saved globally here. They are localized to the action.
Simply returns the unaltered context by default.
"""
return context
def get_id(self):
"""Returns the ID for this step. Suitable for use in HTML markup."""
return "%s__%s" % (self.workflow.slug, self.slug)
def _verify_contributions(self, context):
for key in self.contributes:
# Make sure we don't skip steps based on weird behavior of
# POST query dicts.
field = self.action.fields.get(key, None)
if field and field.required and not context.get(key):
context.pop(key, None)
failed_to_contribute = set(self.contributes)
failed_to_contribute -= set(context.keys())
if failed_to_contribute:
raise exceptions.WorkflowError("The following expected data was "
"not added to the workflow context "
"by the step %s: %s."
% (self.__class__,
failed_to_contribute))
return True
def contribute(self, data, context):
"""Adds the data listed in ``contributes`` to the workflow's context.
By default, the context is simply updated with all the data
returned by the action.
Note that even if the value of one of the ``contributes`` keys is
not present (e.g. optional) the key should still be added to the
context with a value of ``None``.
"""
if data:
for key in self.contributes:
context[key] = data.get(key, None)
return context
def render(self):
"""Renders the step."""
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
return step_template.render(extra_context, self.workflow.request)
def get_help_text(self):
"""Returns the help text for this step."""
text = linebreaks(force_text(self.help_text))
text += self.action.get_help_text()
return safe(text)
def add_step_error(self, message):
"""Adds an error to the Step based on API issues."""
self.action.add_action_error(message)
def has_required_fields(self):
"""Returns True if action contains any required fields."""
return any(field.required for field in self.action.fields.values())
def allowed(self, request):
"""Determines whether or not the step is displayed.
Step instances can override this method to specify conditions under
which this tab should not be shown at all by returning ``False``.
The default behavior is to return ``True`` for all cases.
"""
return True
class WorkflowMetaclass(type):
def __new__(mcs, name, bases, attrs):
super(WorkflowMetaclass, mcs).__new__(mcs, name, bases, attrs)
attrs["_cls_registry"] = []
return type.__new__(mcs, name, bases, attrs)
class UpdateMembersStep(Step):
"""A step that allows a user to add/remove members from a group.
.. attribute:: show_roles
Set to False to disable the display of the roles dropdown.
.. attribute:: available_list_title
The title used for the available list column.
.. attribute:: members_list_title
The title used for the members list column.
.. attribute:: no_available_text
The placeholder text used when the available list is empty.
.. attribute:: no_members_text
The placeholder text used when the members list is empty.
"""
template_name = "horizon/common/_workflow_step_update_members.html"
show_roles = True
available_list_title = _("All available")
members_list_title = _("Members")
no_available_text = _("None available.")
no_members_text = _("No members.")
def get_member_field_name(self, role_id):
if issubclass(self.action_class, MembershipAction):
return self.action.get_member_field_name(role_id)
else:
return self.slug + "_role_" + role_id
@six.python_2_unicode_compatible
@six.add_metaclass(WorkflowMetaclass)
class Workflow(html.HTMLElement):
"""A Workflow is a collection of Steps.
Its interface is very straightforward, but it is responsible for handling
some very important tasks such as:
* Handling the injection, removal, and ordering of arbitrary steps.
* Determining if the workflow can be completed by a given user at runtime
based on all available information.
* Dispatching connections between steps to ensure that when context data
changes all the applicable callback functions are executed.
* Verifying/validating the overall data integrity and subsequently
triggering the final method to complete the workflow.
The ``Workflow`` class has the following attributes:
.. attribute:: name
The verbose name for this workflow which will be displayed to the user.
Defaults to the class name.
.. attribute:: slug
The unique slug for this workflow. Required.
.. attribute:: steps
Read-only access to the final ordered set of step instances for
this workflow.
.. attribute:: default_steps
A list of :class:`~horizon.workflows.Step` classes which serve as the
starting point for this workflow's ordered steps. Defaults to an empty
list (``[]``).
.. attribute:: finalize_button_name
The name which will appear on the submit button for the workflow's
form. Defaults to ``"Save"``.
.. attribute:: success_message
A string which will be displayed to the user upon successful completion
of the workflow. Defaults to
``"{{ workflow.name }} completed successfully."``
.. attribute:: failure_message
A string which will be displayed to the user upon failure to complete
the workflow. Defaults to ``"{{ workflow.name }} did not complete."``
.. attribute:: depends_on
A roll-up list of all the ``depends_on`` values compiled from the
workflow's steps.
.. attribute:: contributions
A roll-up list of all the ``contributes`` values compiled from the
workflow's steps.
.. attribute:: template_name
Path to the template which should be used to render this workflow.
In general the default common template should be used. Default:
``"horizon/common/_workflow.html"``.
.. attribute:: entry_point
The slug of the step which should initially be active when the
workflow is rendered. This can be passed in upon initialization of
the workflow, or set anytime after initialization but before calling
either ``get_entry_point`` or ``render``.
.. attribute:: redirect_param_name
The name of a parameter used for tracking the URL to redirect to upon
completion of the workflow. Defaults to ``"next"``.
.. attribute:: object
The object (if any) which this workflow relates to. In the case of
a workflow which creates a new resource the object would be the created
resource after the relevant creation steps have been undertaken. In
the case of a workflow which updates a resource it would be the
resource being updated after it has been retrieved.
.. attribute:: wizard
Whether to present the workflow as a wizard, with "prev" and "next"
buttons and validation after every step.
"""
slug = None
default_steps = ()
template_name = "horizon/common/_workflow.html"
finalize_button_name = _("Save")
success_message = _("%s completed successfully.")
failure_message = _("%s did not complete.")
redirect_param_name = "next"
multipart = False
wizard = False
_registerable_class = Step
def __str__(self):
return self.name
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
if self.slug is None:
raise AttributeError("The workflow %s must have a slug."
% self.__class__.__name__)
self.name = getattr(self, "name", self.__class__.__name__)
self.request = request
self.depends_on = set([])
self.contributions = set([])
self.entry_point = entry_point
self.object = None
self._register_steps_from_config()
# Put together our steps in order. Note that we pre-register
# non-default steps so that we can identify them and subsequently
# insert them in order correctly.
self._registry = collections.OrderedDict(
[(step_class, step_class(self)) for step_class
in self.__class__._cls_registry
if step_class not in self.default_steps])
self._gather_steps()
# Determine all the context data we need to end up with.
for step in self.steps:
self.depends_on = self.depends_on | set(step.depends_on)
self.contributions = self.contributions | set(step.contributes)
# Initialize our context. For ease we can preseed it with a
# regular dictionary. This should happen after steps have been
# registered and ordered.
self.context = WorkflowContext(self)
context_seed = context_seed or {}
clean_seed = dict([(key, val)
for key, val in context_seed.items()
if key in self.contributions | self.depends_on])
self.context_seed = clean_seed
self.context.update(clean_seed)
if request and request.method == "POST":
for step in self.steps:
valid = step.action.is_valid()
# Be sure to use the CLEANED data if the workflow is valid.
if valid:
data = step.action.cleaned_data
else:
data = request.POST
self.context = step.contribute(data, self.context)
@property
def steps(self):
if getattr(self, "_ordered_steps", None) is None:
self._gather_steps()
return self._ordered_steps
def get_step(self, slug):
"""Returns the instantiated step matching the given slug."""
for step in self.steps:
if step.slug == slug:
return step
def _register_steps_from_config(self):
my_name = '.'.join([self.__class__.__module__,
self.__class__.__name__])
horizon_config = settings.HORIZON_CONFIG.get('extra_steps', {})
extra_steps = horizon_config.get(my_name, [])
for step in extra_steps:
self._register_step_from_config(step, my_name)
def _register_step_from_config(self, step_config, my_name):
if not isinstance(step_config, str):
LOG.error('Extra step definition must be a string '
'(workflow "%s"', my_name)
return
try:
class_ = module_loading.import_string(step_config)
except ImportError:
LOG.error('Step class "%s" is not found (workflow "%s")',
step_config, my_name)
return
self.register(class_)
def _gather_steps(self):
ordered_step_classes = self._order_steps()
for default_step in self.default_steps:
self.register(default_step)
self._registry[default_step] = default_step(self)
self._ordered_steps = []
for step_class in ordered_step_classes:
cls = self._registry[step_class]
if (has_permissions(self.request.user, cls) and
policy.check(cls.policy_rules, self.request) and
cls.allowed(self.request)):
self._ordered_steps.append(cls)
def _order_steps(self):
steps = list(copy.copy(self.default_steps))
additional = self._registry.keys()
for step in additional:
try:
min_pos = steps.index(step.after)
except ValueError:
min_pos = 0
try:
max_pos = steps.index(step.before)
except ValueError:
max_pos = len(steps)
if min_pos > max_pos:
raise exceptions.WorkflowError("The step %(new)s can't be "
"placed between the steps "
"%(after)s and %(before)s; the "
"step %(before)s comes before "
"%(after)s."
% {"new": additional,
"after": step.after,
"before": step.before})
steps.insert(max_pos, step)
return steps
def get_entry_point(self):
"""Returns the slug of the step which the workflow should begin on.
This method takes into account both already-available data and errors
within the steps.
"""
# If we have a valid specified entry point, use it.
if self.entry_point:
if self.get_step(self.entry_point):
return self.entry_point
# Otherwise fall back to calculating the appropriate entry point.
for step in self.steps:
if step.has_errors:
return step.slug
try:
step._verify_contributions(self.context)
except exceptions.WorkflowError:
return step.slug
# If nothing else, just return the first step.
return self.steps[0].slug
def _trigger_handlers(self, key):
responses = []
handlers = [(step.slug, f) for step in self.steps
for f in step._handlers.get(key, [])]
for slug, handler in handlers:
responses.append((slug, handler(self.request, self.context)))
return responses
@classmethod
def register(cls, step_class):
"""Registers a :class:`~horizon.workflows.Step` with the workflow."""
if not inspect.isclass(step_class):
raise ValueError('Only classes may be registered.')
elif not issubclass(step_class, cls._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% cls._registerable_class.__name__)
if step_class in cls._cls_registry:
return False
else:
cls._cls_registry.append(step_class)
return True
@classmethod
def unregister(cls, step_class):
"""Unregisters a :class:`~horizon.workflows.Step` from the workflow."""
try:
cls._cls_registry.remove(step_class)
except ValueError:
raise base.NotRegistered('%s is not registered' % cls)
return cls._unregister(step_class)
def validate(self, context):
"""Hook for custom context data validation.
Should return a booleanvalue or
raise :class:`~horizon.exceptions.WorkflowValidationError`.
"""
return True
def is_valid(self):
"""Verifies that all required data is present in the context.
It also calls the ``validate`` method to allow for finer-grained checks
on the context data.
"""
missing = self.depends_on - set(self.context.keys())
if missing:
raise exceptions.WorkflowValidationError(
"Unable to complete the workflow. The values %s are "
"required but not present." % ", ".join(missing))
# Validate each step. Cycle through all of them to catch all errors
# in one pass before returning.
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
if not steps_valid:
return steps_valid
return self.validate(self.context)
def finalize(self):
"""Finalizes a workflow by running through all the actions.
It runs all the actions in order and calling their ``handle`` methods.
Returns ``True`` on full success, or ``False`` for a partial success,
e.g. there were non-critical errors.
(If it failed completely the function wouldn't return.)
"""
partial = False
for step in self.steps:
try:
data = step.action.handle(self.request, self.context)
if data is True or data is None:
continue
elif data is False:
partial = True
else:
self.context = step.contribute(data or {}, self.context)
except Exception:
partial = True
exceptions.handle(self.request)
if not self.handle(self.request, self.context):
partial = True
return not partial
def handle(self, request, context):
"""Handles any final processing for this workflow.
Should return a boolean value indicating success.
"""
return True
def get_success_url(self):
"""Returns a URL to redirect the user to upon completion.
By default it will attempt to parse a ``success_url`` attribute on the
workflow, which can take the form of a reversible URL pattern name,
or a standard HTTP URL.
"""
try:
return urls.reverse(self.success_url)
except urls.NoReverseMatch:
return self.success_url
def format_status_message(self, message):
"""Hook to allow customization of the message returned to the user.
This is called upon both successful or unsuccessful completion of
the workflow.
By default it simply inserts the workflow's name into the message
string.
"""
if "%s" in message:
return message % self.name
else:
return message
def verify_integrity(self):
provided_keys = self.contributions | set(self.context_seed.keys())
if len(self.depends_on - provided_keys):
raise exceptions.NotAvailable(
_("The current user has insufficient permission to complete "
"the requested task."))
def render(self):
"""Renders the workflow."""
workflow_template = template.loader.get_template(self.template_name)
extra_context = {"workflow": self}
if self.request.is_ajax():
extra_context['modal'] = True
return workflow_template.render(extra_context, self.request)
def get_absolute_url(self):
"""Returns the canonical URL for this workflow.
This is used for the POST action attribute on the form element
wrapping the workflow.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the workflow was requested.
"""
return self.request.get_full_path().partition('?')[0]
def add_error_to_step(self, message, slug):
"""Adds an error message to the workflow's Step.
This is useful when you wish for API errors to appear as errors
on the form rather than using the messages framework.
The workflow's Step is specified by its slug.
"""
step = self.get_step(slug)
if step:
step.add_step_error(message)
| apache-2.0 | 3,305,374,578,553,780,700 | 36.555085 | 79 | 0.596581 | false |
saleemjaveds/https-github.com-openstack-nova | nova/db/sqlalchemy/api.py | 1 | 221580 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import sys
import time
import uuid
from oslo.config import cfg
import six
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import DataError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.schema import Table
from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
connection_opts = [
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(connection_opts, group='database')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('connection',
'nova.openstack.common.db.options',
group='database')
LOG = logging.getLogger(__name__)
_MASTER_FACADE = None
_SLAVE_FACADE = None
def _create_facade_lazily(use_slave=False):
global _MASTER_FACADE
global _SLAVE_FACADE
return_slave = use_slave and CONF.database.slave_connection
if not return_slave:
if _MASTER_FACADE is None:
_MASTER_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems())
)
return _MASTER_FACADE
else:
if _SLAVE_FACADE is None:
_SLAVE_FACADE = db_session.EngineFacade(
CONF.database.slave_connection,
**dict(CONF.database.iteritems())
)
return _SLAVE_FACADE
def get_engine(use_slave=False):
facade = _create_facade_lazily(use_slave)
return facade.get_engine()
def get_session(use_slave=False, **kwargs):
facade = _create_facade_lazily(use_slave)
return facade.get_session(**kwargs)
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warn(_("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param use_slave: If true, use slave_connection
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id. If set to 'allow_none',
restriction includes project_id = None.
:param base_model: Where model_query is passed a "model" parameter which is
not a subclass of NovaBase, we should pass an extra base_model
parameter that is a subclass of NovaBase and corresponds to the
model parameter.
"""
use_slave = kwargs.get('use_slave') or False
if CONF.database.slave_connection == '':
use_slave = False
session = kwargs.get('session') or get_session(use_slave=use_slave)
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only', False)
def issubclassof_nova_base(obj):
return isinstance(obj, type) and issubclass(obj, models.NovaBase)
base_model = model
if not issubclassof_nova_base(base_model):
base_model = kwargs.get('base_model', None)
if not issubclassof_nova_base(base_model):
raise Exception(_("model or base_model parameter should be "
"subclass of NovaBase"))
query = session.query(model, *args)
default_deleted_value = base_model.__mapper__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(base_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(base_model.deleted != default_deleted_value)
else:
raise Exception(_("Unrecognized read_deleted value '%s'")
% read_deleted)
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(base_model.project_id == context.project_id,
base_model.project_id == null()))
else:
query = query.filter_by(project_id=context.project_id)
return query
def exact_filter(query, model, filters, legal_keys):
"""Applies exact match filtering to a query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param model: model object the query applies to, for IN-style
filtering
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def convert_objects_related_datetimes(values, *datetime_keys):
for key in datetime_keys:
if key in values and values[key]:
if isinstance(values[key], six.string_types):
values[key] = timeutils.parse_strtime(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
values[key] = values[key].replace(tzinfo=None)
return values
def _sync_instances(context, project_id, user_id, session):
return dict(zip(('instances', 'cores', 'ram'),
_instance_data_get_for_user(
context, project_id, user_id, session)))
def _sync_floating_ips(context, project_id, user_id, session):
return dict(floating_ips=_floating_ip_count_by_project(
context, project_id, session))
def _sync_fixed_ips(context, project_id, user_id, session):
return dict(fixed_ips=_fixed_ip_count_by_project(
context, project_id, session))
def _sync_security_groups(context, project_id, user_id, session):
return dict(security_groups=_security_group_count_by_project_and_user(
context, project_id, user_id, session))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
'_sync_floating_ips': _sync_floating_ips,
'_sync_fixed_ips': _sync_fixed_ips,
'_sync_security_groups': _sync_security_groups,
}
###################
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.iteritems():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [or_(*[field == value for value in self.values])]
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
count = model_query(context, models.Service, session=session).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.ServiceNotFound(service_id=service_id)
model_query(context, models.ComputeNode, session=session).\
filter_by(service_id=service_id).\
soft_delete(synchronize_session=False)
def _service_get(context, service_id, with_compute_node=True, session=None):
query = model_query(context, models.Service, session=session).\
filter_by(id=service_id)
if with_compute_node:
query = query.options(joinedload('compute_node'))
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get(context, service_id, with_compute_node=False):
return _service_get(context, service_id,
with_compute_node=with_compute_node)
@require_admin_context
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@require_admin_context
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@require_admin_context
def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic=CONF.compute_topic).\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id,
with_compute_node=False, session=session)
service_ref.update(values)
return service_ref
###################
def compute_node_get(context, compute_id):
return _compute_node_get(context, compute_id)
def _compute_node_get(context, compute_id, session=None):
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
options(joinedload('service')).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@require_admin_context
def compute_node_get_by_service_id(context, service_id):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(service_id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def compute_node_get_all(context, no_date_fields):
# NOTE(msdubov): Using lower-level 'select' queries and joining the tables
# manually here allows to gain 3x speed-up and to have 5x
# less network load / memory usage compared to the sqla ORM.
engine = get_engine()
# Retrieve ComputeNode, Service
compute_node = models.ComputeNode.__table__
service = models.Service.__table__
with engine.begin() as conn:
redundant_columns = set(['deleted_at', 'created_at', 'updated_at',
'deleted']) if no_date_fields else set([])
def filter_columns(table):
return [c for c in table.c if c.name not in redundant_columns]
compute_node_query = sql.select(filter_columns(compute_node)).\
where(compute_node.c.deleted == 0).\
order_by(compute_node.c.service_id)
compute_node_rows = conn.execute(compute_node_query).fetchall()
service_query = sql.select(filter_columns(service)).\
where((service.c.deleted == 0) &
(service.c.binary == 'nova-compute')).\
order_by(service.c.id)
service_rows = conn.execute(service_query).fetchall()
# Join ComputeNode & Service manually.
services = {}
for proxy in service_rows:
services[proxy['id']] = dict(proxy.items())
compute_nodes = []
for proxy in compute_node_rows:
node = dict(proxy.items())
node['service'] = services.get(proxy['service_id'])
compute_nodes.append(node)
return compute_nodes
@require_admin_context
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
options(joinedload('service')).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
@require_admin_context
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
@require_admin_context
@_retry_on_deadlock
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
session = get_session()
with session.begin():
compute_ref = _compute_node_get(context, compute_id, session=session)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
convert_objects_related_datetimes(values, *datetime_keys)
compute_ref.update(values)
return compute_ref
@require_admin_context
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
session = get_session()
with session.begin():
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
soft_delete(synchronize_session=False)
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
result = model_query(context,
func.count(models.ComputeNode.id),
func.sum(models.ComputeNode.vcpus),
func.sum(models.ComputeNode.memory_mb),
func.sum(models.ComputeNode.local_gb),
func.sum(models.ComputeNode.vcpus_used),
func.sum(models.ComputeNode.memory_mb_used),
func.sum(models.ComputeNode.local_gb_used),
func.sum(models.ComputeNode.free_ram_mb),
func.sum(models.ComputeNode.free_disk_gb),
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
base_model=models.ComputeNode,
read_deleted="no").\
filter(models.Service.disabled == false()).\
filter(
models.Service.id ==
models.ComputeNode.service_id).\
first()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return dict((field, int(result[idx] or 0))
for idx, field in enumerate(fields))
###################
@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
@require_admin_context
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_admin_context
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_admin_context
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
except DataError:
msg = _("Invalid floating ip id %s in request") % id
LOG.warn(msg)
raise exception.InvalidID(id=id)
return result
@require_context
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp.pool,
base_model=models.FloatingIp).distinct():
pools.append({'name': result[0]})
return pools
@require_context
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
session=session, read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
floating_ip_ref['project_id'] = project_id
floating_ip_ref['auto_assigned'] = auto_assigned
session.add(floating_ip_ref)
return floating_ip_ref['address']
@require_context
def floating_ip_bulk_create(context, ips):
session = get_session()
result = []
with session.begin():
for ip in ips:
model = models.FloatingIp()
model.update(ip)
result.append(model)
try:
# NOTE(boris-42): To get existing address we have to do each
# time session.flush()..
session.add(model)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=ip['address'])
return result
def _ip_range_splitter(ips, block_size=256):
"""Yields blocks of IPs no more than block_size elements long."""
out = []
count = 0
for ip in ips:
out.append(ip['address'])
count += 1
if count > block_size - 1:
yield out
out = []
count = 0
if out:
yield out
@require_context
def floating_ip_bulk_destroy(context, ips):
session = get_session()
with session.begin():
project_id_to_quota_count = collections.defaultdict(int)
for ip_block in _ip_range_splitter(ips):
# Find any floating IPs that were not auto_assigned and
# thus need quota released.
query = model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
filter_by(auto_assigned=False)
rows = query.all()
for row in rows:
# The count is negative since we release quota by
# reserving negative quota.
project_id_to_quota_count[row['project_id']] -= 1
# Delete the floating IPs.
model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
soft_delete(synchronize_session='fetch')
# Delete the quotas, if needed.
for project_id, count in project_id_to_quota_count.iteritems():
try:
reservations = quota.QUOTAS.reserve(context,
project_id=project_id,
floating_ips=count)
quota.QUOTAS.commit(context,
reservations,
project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update usages bulk "
"deallocating floating IP"))
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
floating_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
def _floating_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
@_retry_on_deadlock
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
floating_ip_ref = _floating_ip_get_by_address(context,
floating_address,
session=session)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(address=fixed_address).\
options(joinedload('network')).\
first()
if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]:
return None
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
floating_ip_ref.host = host
return fixed_ip_ref
@require_context
@_retry_on_deadlock
def floating_ip_deallocate(context, address):
session = get_session()
with session.begin():
return model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
filter(models.FloatingIp.project_id != null()).\
update({'project_id': None,
'host': None,
'auto_assigned': False},
synchronize_session=False)
@require_context
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
delete()
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = model_query(context,
models.FloatingIp,
session=session).\
filter_by(address=address).\
first()
if not floating_ip_ref:
raise exception.FloatingIpNotFoundForAddress(address=address)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
options(joinedload('network')).\
first()
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
return fixed_ip_ref
@require_context
def floating_ip_set_auto_assigned(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
update({'auto_assigned': True})
def _floating_ip_get_all(context, session=None):
return model_query(context, models.FloatingIp, read_deleted="no",
session=session)
@require_admin_context
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@require_admin_context
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
options(joinedload_all('fixed_ip.instance')).\
all()
@require_context
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
def _floating_ip_get_by_address(context, address, session=None):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid floating IP %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and nova.context.is_user_context(context):
nova.context.authorize_project_context(context, result.project_id)
return result
@require_context
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
models.FixedIp.id ==
models.FloatingIp.fixed_ip_id).\
filter(models.FixedIp.address == fixed_address).\
all()
@require_context
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
float_ip_ref = _floating_ip_get_by_address(context, address, session)
float_ip_ref.update(values)
try:
float_ip_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return float_ip_ref
def _dnsdomain_get(context, session, fqdomain):
return model_query(context, models.DNSDomain,
session=session, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
@require_context
def dnsdomain_get(context, fqdomain):
session = get_session()
with session.begin():
return _dnsdomain_get(context, session, fqdomain)
def _dnsdomain_get_or_create(context, session, fqdomain):
domain_ref = _dnsdomain_get(context, session, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
@require_admin_context
def dnsdomain_register_for_zone(context, fqdomain, zone):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
session.add(domain_ref)
@require_admin_context
def dnsdomain_register_for_project(context, fqdomain, project):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
session.add(domain_ref)
@require_admin_context
def dnsdomain_unregister(context, fqdomain):
model_query(context, models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
@require_context
def dnsdomain_list(context):
query = model_query(context, models.DNSDomain, read_deleted="no")
return [row.domain for row in query.all()]
def dnsdomain_get_all(context):
return model_query(context, models.DNSDomain, read_deleted="no").all()
###################
@require_admin_context
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
raise exception.FixedIpAlreadyInUse(address=address,
instance_uuid=instance_uuid)
if not fixed_ip_ref.network_id:
fixed_ip_ref.network_id = network_id
fixed_ip_ref.instance_uuid = instance_uuid
session.add(fixed_ip_ref)
return fixed_ip_ref
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
raise exception.NoMoreFixedIps()
if fixed_ip_ref['network_id'] is None:
fixed_ip_ref['network'] = network_id
if instance_uuid:
fixed_ip_ref['instance_uuid'] = instance_uuid
if host:
fixed_ip_ref['host'] = host
session.add(fixed_ip_ref)
return fixed_ip_ref
@require_context
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
def fixed_ip_bulk_create(context, ips):
session = get_session()
with session.begin():
for ip in ips:
model = models.FixedIp()
model.update(ip)
try:
# NOTE (vsergeyev): To get existing address we have to do each
# time session.flush().
# See related note at line 697.
session.add(model)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=ip['address'])
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update({'instance_uuid': None,
'virtual_interface_id': None})
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(context, host, time):
session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == true()),
models.Network.host == host)
result = model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
filter(models.FixedIp.allocated == false()).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
@require_admin_context
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def _fixed_ip_get_by_address(context, address, session=None,
columns_to_join=None):
if session is None:
session = get_session()
if columns_to_join is None:
columns_to_join = []
with session.begin(subtransactions=True):
try:
result = model_query(context, models.FixedIp, session=session)
for column in columns_to_join:
result = result.options(joinedload_all(column))
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'],
session
)
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_admin_context
def fixed_ip_get_by_address_detailed(context, address):
""":returns: a tuple of (models.FixedIp, models.Network, models.Instance)
"""
try:
result = model_query(context, models.FixedIp,
models.Network, models.Instance).\
filter_by(address=address).\
outerjoin((models.Network,
models.Network.id ==
models.FixedIp.network_id)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except DataError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
return result
@require_context
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
outerjoin(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
@require_admin_context
def fixed_ip_get_by_host(context, host):
session = get_session()
with session.begin():
instance_uuids = _instance_get_all_uuids_by_host(context, host,
session=session)
if not instance_uuids:
return []
return model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
all()
return result
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update(values)
def _fixed_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context, session=None, use_slave=False):
return model_query(context, models.VirtualInterface, session=session,
read_deleted="no", use_slave=use_slave)
@require_context
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except DataError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
return vif_refs
@require_context
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, session, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, session=session,
read_deleted=False).\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warn(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
convert_objects_related_datetimes(values, *datetime_keys)
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref.update(values)
def _get_sec_group_models(session, security_groups):
models = []
default_group = security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
session, context.project_id, security_groups))
return models
session = get_session()
with session.begin():
if 'hostname' in values:
_validate_unique_server_name(context, session, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id, session=None):
result = model_query(context,
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
base_model=models.Instance,
session=session).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
@_retry_on_deadlock
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session)
else:
raise exception.InvalidUUID(instance_uuid)
query = model_query(context, models.Instance, session=session).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceInfoCache, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceMetadata, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceFault, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceExtra, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
return instance_ref
@require_context
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join, use_slave=use_slave)
def _instance_get_by_uuid(context, uuid, session=None,
columns_to_join=None, use_slave=False):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join,
use_slave=use_slave).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except DataError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warn(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, session=None,
columns_to_join=None, use_slave=False):
query = model_query(context, models.Instance, session=session,
project_only=True, use_slave=use_slave).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
query = query.options(joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances,
manual_joins=None, use_slave=False):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids,
use_slave=use_slave):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids,
use_slave=use_slave):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst.iteritems())
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
manual_joins = []
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join:
columns_to_join.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join
@require_context
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join = _manual_join_columns(columns_to_join)
query = model_query(context, models.Instance)
for column in columns_to_join:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None,
use_slave=False):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters::
| ['project_id', 'user_id', 'image_ref',
| 'vm_state', 'instance_type_id', 'uuid',
| 'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'::
| filters = {
| 'filter': [
| {'name': 'tag-key', 'value': '<metakey>'},
| {'name': 'tag-value', 'value': '<metaval>'},
| {'name': 'tag:<metakey>', 'value': '<metaval>'}
| ]
| }
Special keys are used to tweek the query further::
| 'changes-since' - only return instances updated after
| 'deleted' - only return (or exclude) deleted instances
| 'soft_deleted' - modify behavior of 'deleted' to either
| include or exclude instances whose
| vm_state is SOFT_DELETED.
"""
# NOTE(mriedem): If the limit is 0 there is no point in even going
# to the database since nothing is going to be returned anyway.
if limit == 0:
return []
sort_fn = {'desc': desc, 'asc': asc}
if CONF.database.slave_connection == '':
use_slave = False
session = get_session(use_slave=use_slave)
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join = _manual_join_columns(columns_to_join)
query_prefix = session.query(models.Instance)
for column in columns_to_join:
query_prefix = query_prefix.options(joinedload(column))
query_prefix = query_prefix.order_by(sort_fn[sort_dir](
getattr(models.Instance, sort_key)))
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at >= changes_since)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
if filters.pop('soft_deleted', True):
deleted = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(deleted)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
filters, exact_match_filter_names)
query_prefix = regex_filter(query_prefix, models.Instance, filters)
query_prefix = tag_filter(context, query_prefix, models.Instance,
models.InstanceMetadata,
models.InstanceMetadata.instance_uuid,
filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(context, marker, session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
[sort_key, 'created_at', 'id'],
marker=marker,
sort_dir=sort_dir)
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def tag_filter(context, query, model, model_metadata,
model_uuid, filters):
"""Applies tag filtering to a query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param query: query to apply filters to
:param model: model object the query applies to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
or_query = None
def _to_list(val):
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
return val
for filter_block in filters['filter']:
if not isinstance(filter_block, dict):
continue
filter_name = filter_block.get('name')
if filter_name is None:
continue
tag_name = filter_name[4:]
tag_val = _to_list(filter_block.get('value'))
if filter_name.startswith('tag-'):
if tag_name not in ['key', 'value']:
msg = _("Invalid field name: %s") % tag_name
raise exception.InvalidParameterValue(err=msg)
subq = getattr(model_metadata, tag_name).in_(tag_val)
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
subq = model_query(context, model_uuid,
session=query.session, base_model=model_metadata).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
subq = model_query(context, model_uuid,
session=query.session, base_model=model_metadata).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
return query
def regex_filter(query, model, filters):
"""Applies regular expression filtering to a query.
Returns the updated query.
:param query: query to apply filters to
:param model: model object the query applies to
:param filters: dictionary of filters with regex values
"""
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'sqlite': 'REGEXP'
}
db_string = CONF.database.connection.split(':')[0].split('+')[0]
db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
for filter_name in filters.iterkeys():
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
if db_regexp_op == 'LIKE':
query = query.filter(column_attr.op(db_regexp_op)(
'%' + str(filters[filter_name]) + '%'))
else:
query = query.filter(column_attr.op(db_regexp_op)(
str(filters[filter_name])))
return query
def process_sort_params(sort_keys, sort_dirs,
default_keys=['created_at', 'id'],
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified
"""
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs) != 0:
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys
if sort_dirs:
result_dirs = list(sort_dirs)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction size exceeds sort key size")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@require_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
use_slave=False):
"""Return instances and joins that were active during window."""
session = get_session(use_slave=use_slave)
query = session.query(models.Instance)
query = query.options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
filter(or_(models.Instance.terminated_at == null(),
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
return _instances_fill_metadata(context, query.all())
def _instance_get_all_query(context, project_only=False,
joins=None, use_slave=False):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only,
use_slave=use_slave)
for join in joins:
query = query.options(joinedload(join))
return query
@require_admin_context
def instance_get_all_by_host(context, host,
columns_to_join=None,
use_slave=False):
return _instances_fill_metadata(context,
_instance_get_all_query(context,
use_slave=use_slave).filter_by(host=host).all(),
manual_joins=columns_to_join,
use_slave=use_slave)
def _instance_get_all_uuids_by_host(context, host, session=None):
"""Return a list of the instance uuids on a given host.
Returns a list of UUIDs, not Instance model objects. This internal version
allows you to specify a session object as a kwarg.
"""
uuids = []
for tuple in model_query(context, models.Instance.uuid, read_deleted="no",
base_model=models.Instance, session=session).\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
@require_admin_context
def instance_get_all_by_host_and_node(context, host, node):
return _instances_fill_metadata(context,
_instance_get_all_query(context, joins=[]).filter_by(host=host).
filter_by(node=node).all(), manual_joins=[])
@require_admin_context
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
# NOTE(jkoelker) This is only being left here for compat with floating
# ips. Currently the network_api doesn't return floaters
# in network_info. Once it starts return the model. This
# function and its call in compute/manager.py on 1829 can
# go away
@require_context
def instance_get_floating_address(context, instance_id):
instance = instance_get(context, instance_id)
fixed_ips = fixed_ip_get_by_instance(context, instance['uuid'])
if not fixed_ips:
return None
# NOTE(tr3buchet): this only gets the first fixed_ip
# won't find floating ips associated with other fixed_ips
floating_ips = floating_ip_get_by_fixed_address(context,
fixed_ips[0]['address'])
if not floating_ips:
return None
# NOTE(vish): this just returns the first floating ip
return floating_ips[0]['address']
@require_context
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
floating_ips = model_query(context,
models.FloatingIp.address,
base_model=models.FloatingIp).\
join(models.FloatingIp.fixed_ip).\
filter_by(instance_uuid=instance_uuid)
return [floating_ip.address for floating_ip in floating_ips]
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
return _instances_fill_metadata(context,
model_query(context, models.Instance).
filter(models.Instance.updated_at <= reboot_window).
filter_by(task_state=task_states.REBOOTING).all(),
manual_joins=[])
@require_context
def instance_update(context, instance_uuid, values):
instance_ref = _instance_update(context, instance_uuid, values)[1]
return instance_ref
@require_context
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance uuid
:param values: = dict containing column values
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
return _instance_update(context, instance_uuid, values,
copy_old_instance=True,
columns_to_join=columns_to_join)
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata, session):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
for condemned in to_delete:
condemned.soft_delete(session=session)
for key, value in metadata.iteritems():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
session.add(newitem)
instance[metadata_type].append(newitem)
def _instance_update(context, instance_uuid, values, copy_old_instance=False,
columns_to_join=None):
session = get_session()
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
with session.begin():
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session,
columns_to_join=columns_to_join)
if "expected_task_state" in values:
# it is not a db column so always pop out
expected = values.pop("expected_task_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["task_state"]
if actual_state not in expected:
if actual_state == task_states.DELETING:
raise exception.UnexpectedDeletingTaskStateError(
actual=actual_state, expected=expected)
else:
raise exception.UnexpectedTaskStateError(
actual=actual_state, expected=expected)
if "expected_vm_state" in values:
expected = values.pop("expected_vm_state")
if not isinstance(expected, (tuple, list, set)):
expected = (expected,)
actual_state = instance_ref["vm_state"]
if actual_state not in expected:
raise exception.UnexpectedVMStateError(actual=actual_state,
expected=expected)
instance_hostname = instance_ref['hostname'] or ''
if ("hostname" in values and
values["hostname"].lower() != instance_hostname.lower()):
_validate_unique_server_name(context,
session,
values['hostname'])
if copy_old_instance:
old_instance_ref = copy.copy(instance_ref)
else:
old_instance_ref = None
metadata = values.get('metadata')
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
values.pop('metadata'),
session)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
values.pop('system_metadata'),
session)
_handle_objects_related_type_conversions(values)
instance_ref.update(values)
session.add(instance_ref)
return (old_instance_ref, instance_ref)
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save()
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
:param session: = optional session object
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
:param session: = optional session object
"""
session = get_session()
with session.begin():
info_cache = model_query(context, models.InstanceInfoCache,
session=session).\
filter_by(instance_uuid=instance_uuid).\
first()
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
info_cache = models.InstanceInfoCache()
values['instance_uuid'] = instance_uuid
try:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
:param session: = optional session object
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
def instance_extra_create(context, values):
inst_extra_ref = models.InstanceExtra()
inst_extra_ref.update(values)
inst_extra_ref.save()
return inst_extra_ref
def _instance_extra_get_by_instance_uuid_query(context, instance_uuid):
return (model_query(context, models.InstanceExtra)
.filter_by(instance_uuid=instance_uuid))
def instance_extra_get_by_instance_uuid(context, instance_uuid):
query = _instance_extra_get_by_instance_uuid_query(
context, instance_uuid)
instance_extra = query.first()
return instance_extra
###################
@require_context
def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@require_context
def key_pair_destroy(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
soft_delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
@require_context
def key_pair_get(context, user_id, name):
nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
def key_pair_get_all_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
def key_pair_count_by_user(context, user_id):
nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
###################
@require_admin_context
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
session = get_session()
with session.begin():
def network_query(project_filter, id=None):
filter_kwargs = {'project_id': project_filter}
if id is not None:
filter_kwargs['id'] = id
return model_query(context, models.Network, session=session,
read_deleted="no").\
filter_by(**filter_kwargs).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None, network_id)
if not network_ref:
raise exception.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
session.add(network_ref)
return network_ref
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@require_admin_context
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
@require_admin_context
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
network_ref.save()
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
@require_admin_context
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
result = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = _network_get(context, network_id=network_id,
session=session)
model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
soft_delete()
session.delete(network_ref)
@require_admin_context
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
if disassociate_project:
net_update['project_id'] = None
if disassociate_host:
net_update['host'] = None
network_update(context, network_id, net_update)
def _network_get(context, network_id, session=None, project_only='allow_none'):
result = model_query(context, models.Network, session=session,
project_only=project_only).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_context
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
if not result:
raise exception.NoNetworksFound()
return result
@require_context
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
filter(models.Network.uuid.in_(network_uuids)).\
all()
if not result:
raise exception.NoNetworksFound()
# check if the result contains all the networks
# we are looking for
for network_uuid in network_uuids:
for network in result:
if network['uuid'] == network_uuid:
break
else:
if project_only:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
# pylint: disable=C0103
@require_admin_context
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
session = get_session()
query = session.query(models.FixedIp.address,
models.FixedIp.instance_uuid,
models.FixedIp.network_id,
models.FixedIp.virtual_interface_id,
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at,
models.FixedIp.allocated,
models.FixedIp.leased).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
filter(models.FixedIp.instance_uuid != null()).\
filter(models.FixedIp.virtual_interface_id != null())
if host:
query = query.filter(models.Instance.host == host)
result = query.all()
data = []
for datum in result:
cleaned = {}
cleaned['address'] = datum[0]
cleaned['instance_uuid'] = datum[1]
cleaned['network_id'] = datum[2]
cleaned['vif_id'] = datum[3]
cleaned['vif_address'] = datum[4]
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
cleaned['allocated'] = datum[8]
cleaned['leased'] = datum[9]
data.append(cleaned)
return data
def network_in_use_on_host(context, network_id, host):
fixed_ips = network_get_associated_fixed_ips(context, network_id, host)
return len(fixed_ips) > 0
def _network_get_query(context, session=None):
return model_query(context, models.Network, session=session,
read_deleted="no")
@require_admin_context
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
@require_admin_context
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@require_admin_context
def network_get_all_by_host(context, host):
session = get_session()
fixed_host_filter = or_(models.FixedIp.host == host,
models.Instance.host == host)
fixed_ip_query = model_query(context, models.FixedIp.network_id,
base_model=models.FixedIp,
session=session).\
outerjoin((models.VirtualInterface,
models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.VirtualInterface.instance_uuid)).\
filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context, session=session).\
filter(host_filter).\
all()
@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
network_ref = _network_get_query(context, session=session).\
filter_by(id=network_id).\
with_lockmode('update').\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref['host']:
network_ref['host'] = host_id
session.add(network_ref)
return network_ref['host']
@require_context
def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = _network_get(context, network_id, session=session)
network_ref.update(values)
try:
network_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
###################
@require_context
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get_all_by_project_and_user(context, project_id, user_id):
nova.context.authorize_project_context(context, project_id)
user_quotas = model_query(context, models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit,
base_model=models.ProjectUserQuota).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for user_quota in user_quotas:
result[user_quota.resource] = user_quota.hard_limit
return result
@require_context
def quota_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_get_all(context, project_id):
nova.context.authorize_project_context(context, project_id)
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
@require_admin_context
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
nova.context.authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save()
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None):
nova.context.authorize_project_context(context, project_id)
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id)
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null()))
result['user_id'] = user_id
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(context, project_id, user_id, resource, in_use,
reserved, until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session=session)
return quota_usage_ref
@require_admin_context
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
for key in ['in_use', 'reserved', 'until_refresh']:
if key in kwargs:
updates[key] = kwargs[key]
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null())).\
update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
def _reservation_create(context, uuid, usage, project_id, user_id, resource,
delta, expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_project_user_quota_usages(context, session, project_id,
user_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
proj_result = dict()
user_result = dict()
# Get the total count of in_use,reserved
for row in rows:
proj_result.setdefault(row.resource,
dict(in_use=0, reserved=0, total=0))
proj_result[row.resource]['in_use'] += row.in_use
proj_result[row.resource]['reserved'] += row.reserved
proj_result[row.resource]['total'] += (row.in_use + row.reserved)
if row.user_id is None or row.user_id == user_id:
user_result[row.resource] = row
return proj_result, user_result
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Get the current usages
project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if ((resource not in PER_PROJECT_QUOTAS) and
(resource not in user_usages)):
user_usages[resource] = _quota_usage_create(elevated,
project_id,
user_id,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif ((resource in PER_PROJECT_QUOTAS) and
(resource not in user_usages)):
user_usages[resource] = _quota_usage_create(elevated,
project_id,
None,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif user_usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif user_usages[resource].until_refresh is not None:
user_usages[resource].until_refresh -= 1
if user_usages[resource].until_refresh <= 0:
refresh = True
elif max_age and (user_usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age:
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(elevated, project_id, user_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if ((res not in PER_PROJECT_QUOTAS) and
(res not in user_usages)):
user_usages[res] = _quota_usage_create(elevated,
project_id,
user_id,
res,
0, 0,
until_refresh or None,
session=session)
if ((res in PER_PROJECT_QUOTAS) and
(res not in user_usages)):
user_usages[res] = _quota_usage_create(elevated,
project_id,
None,
res,
0, 0,
until_refresh or None,
session=session)
if user_usages[res].in_use != in_use:
LOG.debug('quota_usages out of sync, updating. '
'project_id: %(project_id)s, '
'user_id: %(user_id)s, '
'resource: %(res)s, '
'tracked usage: %(tracked_use)s, '
'actual usage: %(in_use)s',
{'project_id': project_id,
'user_id': user_id,
'res': res,
'tracked_use': user_usages[res].in_use,
'in_use': in_use})
# Update the usage
user_usages[res].in_use = in_use
user_usages[res].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_usages.items():
if key not in project_usages:
project_usages[key] = value
overs = [res for res, delta in deltas.items()
if user_quotas[res] >= 0 and delta >= 0 and
(project_quotas[res] < delta +
project_usages[res]['total'] or
user_quotas[res] < delta +
user_usages[res].total)]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(elevated,
str(uuid.uuid4()),
user_usages[res],
project_id,
user_id,
res, delta, expire,
session=session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_usages.values():
session.add(usage_ref)
if unders:
LOG.warning(_("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
if project_quotas == user_quotas:
usages = project_usages
else:
usages = user_usages
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items())
headroom = dict((res, user_quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in user_quotas.keys())
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if user_quotas.get('cores') == -1:
if deltas['cores']:
hc = headroom['instances'] * deltas['cores']
headroom['cores'] = hc / deltas['instances']
else:
headroom['cores'] = headroom['instances']
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if user_quotas.get('ram') == -1:
if deltas['ram']:
hr = headroom['instances'] * deltas['ram']
headroom['ram'] = hr / deltas['instances']
else:
headroom['ram'] = headroom['instances']
raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas,
usages=usages, headroom=headroom)
return reservations
def _quota_reservations_query(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update')
@require_context
@_retry_on_deadlock
def reservation_commit(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
_project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
@_retry_on_deadlock
def reservation_rollback(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
_project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
session = get_session()
with session.begin():
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
@require_admin_context
@_retry_on_deadlock
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
reservation_query = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter(models.Reservation.expire < current_time)
for reservation in reservation_query.join(models.QuotaUsage).all():
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
session.add(reservation.usage)
reservation_query.soft_delete(synchronize_session=False)
###################
def _ec2_volume_get_query(context, session=None):
return model_query(context, models.VolumeIdMapping,
session=session, read_deleted='yes')
def _ec2_snapshot_get_query(context, session=None):
return model_query(context, models.SnapshotIdMapping,
session=session, read_deleted='yes')
@require_context
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
ec2_volume_ref.update({'id': id})
ec2_volume_ref.save()
return ec2_volume_ref
@require_context
def ec2_volume_get_by_uuid(context, volume_uuid):
result = _ec2_volume_get_query(context).\
filter_by(uuid=volume_uuid).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_uuid)
return result
@require_context
def ec2_volume_get_by_id(context, volume_id):
result = _ec2_volume_get_query(context).\
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
"""Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
ec2_snapshot_ref.update({'id': id})
ec2_snapshot_ref.save()
return ec2_snapshot_ref
@require_context
def ec2_snapshot_get_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
return result
@require_context
def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
result = _ec2_snapshot_get_query(context).\
filter_by(uuid=snapshot_uuid).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid)
return result
###################
def _block_device_mapping_get_query(context, session=None,
columns_to_join=None, use_slave=False):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping,
session=session, use_slave=use_slave)
for column in columns_to_join:
query = query.options(joinedload(column))
return query
def _scrub_empty_str_values(dct, keys_to_scrub):
"""Remove any keys found in sequence keys_to_scrub from the dict
if they have the value ''.
"""
for key in keys_to_scrub:
if key in dct and dct[key] == '':
del dct[key]
def _from_legacy_values(values, legacy, allow_updates=False):
if legacy:
if allow_updates and block_device.is_safe_for_update(values):
return values
else:
return block_device.BlockDeviceDict.from_legacy(values)
else:
return values
@require_context
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save()
return bdm_ref
@require_context
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
query.update(values)
return query.first()
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
session = get_session()
with session.begin():
result = None
# NOTE(xqueralt): Only update a BDM when device_name was provided. We
# allow empty device names so they will be set later by the manager.
if values['device_name']:
query = _block_device_mapping_get_query(context, session=session)
result = query.filter_by(instance_uuid=values['instance_uuid'],
device_name=values['device_name']).first()
if result:
result.update(values)
else:
# Either the device_name doesn't exist in the database yet, or no
# device_name was provided. Both cases mean creating a new BDM.
result = models.BlockDeviceMapping(**values)
result.save(session=session)
# NOTE(xqueralt): Prevent from having multiple swap devices for the
# same instance. This will delete all the existing ones.
if block_device.new_format_is_swap(values):
query = _block_device_mapping_get_query(context, session=session)
query = query.filter_by(instance_uuid=values['instance_uuid'],
source_type='blank', guest_format='swap')
query = query.filter(models.BlockDeviceMapping.id != result.id)
query.soft_delete()
return result
@require_context
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
return _block_device_mapping_get_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
def block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
first()
@require_context
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
###################
def _security_group_create(context, values, session=None):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
security_group_name=values['name'])
return security_group_ref
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
def _security_group_get_by_names(context, session, project_id, group_names):
"""Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, session=session,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
if len(sg_models) == len(group_names):
return sg_models
# Find the first one missing and raise
group_names_from_models = [x.name for x in sg_models]
for group_name in group_names:
if group_name not in group_names_from_models:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
# Not Reached
@require_context
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
def security_group_get(context, security_group_id, columns_to_join=None):
query = _security_group_get_query(context, project_only=True).\
filter_by(id=security_group_id)
if columns_to_join is None:
columns_to_join = []
for column in columns_to_join:
if column.startswith('instances'):
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
if columns_to_join is None:
columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(uuid=instance_uuid).\
all()
@require_context
def security_group_in_use(context, group_id):
session = get_session()
with session.begin():
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no", session=session).\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = model_query(context, models.Instance,
session=session, read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
return False
@require_context
def security_group_create(context, values):
return _security_group_create(context, values)
@require_context
def security_group_update(context, security_group_id, values,
columns_to_join=None):
session = get_session()
with session.begin():
query = model_query(context, models.SecurityGroup,
session=session).filter_by(id=security_group_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload_all(column))
security_group_ref = query.first()
if not security_group_ref:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
security_group_ref.update(values)
name = security_group_ref['name']
project_id = security_group_ref['project_id']
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=project_id,
security_group_name=name)
return security_group_ref
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
session = get_session()
with session.begin():
try:
default_group = _security_group_get_by_names(context,
session,
context.project_id,
['default'])[0]
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
default_group = _security_group_create(context, values,
session=session)
usage = model_query(context, models.QuotaUsage,
read_deleted="no", session=session).\
filter_by(project_id=context.project_id).\
filter_by(user_id=context.user_id).\
filter_by(resource='security_groups')
# Create quota usage for auto created default security group
if not usage.first():
elevated = context.elevated()
_quota_usage_create(elevated,
context.project_id,
context.user_id,
'security_groups',
1, 0,
None,
session=session)
else:
usage.update({'in_use': int(usage.first().in_use) + 1})
default_rules = _security_group_rule_get_default_query(context,
session=session).all()
for default_rule in default_rules:
# This is suboptimal, it should be programmatic to know
# the values of the default_rule
rule_values = {'protocol': default_rule.protocol,
'from_port': default_rule.from_port,
'to_port': default_rule.to_port,
'cidr': default_rule.cidr,
'parent_group_id': default_group.id,
}
_security_group_rule_create(context,
rule_values,
session=session)
return default_group
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
model_query(context, models.SecurityGroup,
session=session).\
filter_by(id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(security_group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
def _security_group_count_by_project_and_user(context, project_id, user_id,
session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
###################
def _security_group_rule_create(context, values, session=None):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save(session=session)
return security_group_rule_ref
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@require_context
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['grantee_group.instances.system_metadata',
'grantee_group.instances.info_cache']
query = (_security_group_rule_get_query(context).
filter_by(parent_group_id=security_group_id))
for column in columns_to_join:
query = query.options(joinedload_all(column))
return query.all()
@require_context
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
return (_security_group_rule_get_query(context).
filter_by(group_id=security_group_id).
all())
@require_context
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
soft_delete())
if count == 0:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
@require_context
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
#
###################
def _security_group_rule_get_default_query(context, session=None):
return model_query(context, models.SecurityGroupIngressDefaultRule,
session=session)
@require_context
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
first()
if not result:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
return result
@require_admin_context
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
session = get_session()
with session.begin():
count = _security_group_rule_get_default_query(context,
session=session).\
filter_by(id=security_group_rule_default_id).\
soft_delete()
if count == 0:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
@require_admin_context
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
security_group_default_rule_ref.save()
return security_group_default_rule_ref
@require_context
def security_group_default_rule_list(context):
return _security_group_rule_get_default_query(context).\
all()
###################
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
@require_admin_context
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
@require_admin_context
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
soft_delete()
###################
@require_context
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
###################
@require_admin_context
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
@require_admin_context
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = _migration_get(context, id, session=session)
migration.update(values)
return migration
def _migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@require_admin_context
def migration_get(context, id):
return _migration_get(context, id)
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
@require_admin_context
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute, use_slave=False):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes",
use_slave=use_slave).\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
all()
@require_admin_context
def migration_get_in_progress_by_host_and_node(context, host, node):
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['confirmed', 'reverted',
'error'])).\
options(joinedload_all('instance.system_metadata')).\
all()
@require_admin_context
def migration_get_all_by_filters(context, filters):
query = model_query(context, models.Migration)
if "status" in filters:
query = query.filter(models.Migration.status == filters["status"])
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
return query.all()
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
pool.save()
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
console_type=values["console_type"],
compute_host=values["compute_host"],
)
return pool
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# NOTE(mdragon): consoles are meant to be transient.
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_uuid=instance_uuid).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_uuid=instance_uuid)
return result
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.all()
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_uuid is not None:
query = query.filter_by(instance_uuid=instance_uuid)
result = query.first()
if not result:
if instance_uuid:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_uuid=instance_uuid)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@require_admin_context
def flavor_create(context, values, projects=None):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
if projects is None:
projects = []
session = get_session()
with session.begin():
try:
instance_type_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
raise exception.FlavorIdExists(flavor_id=values['flavorid'])
raise exception.FlavorExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_ref.id,
"project_id": project})
access_ref.save()
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value'])
for x in inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _flavor_get_query(context, session=None, read_deleted=None):
query = model_query(context, models.InstanceTypes, session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
the_filter = [models.InstanceTypes.is_public == true()]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
"""Returns all flavors.
"""
filters = filters or {}
# FIXME(sirp): now that we have the `disabled` field for flavors, we
# should probably remove the use of `deleted` to mark inactive. `deleted`
# should mean truly deleted, e.g. we can safely purge the record out of the
# database.
read_deleted = "yes" if inactive else "no"
query = _flavor_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
if 'disabled' in filters:
query = query.filter(
models.InstanceTypes.disabled == filters['disabled'])
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
marker_row = None
if marker is not None:
marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
marker=marker_row,
sort_dir=sort_dir)
inst_types = query.all()
return [_dict_with_extra_specs(i) for i in inst_types]
def _flavor_get_id_from_flavor_query(context, flavor_id, session=None):
return model_query(context, models.InstanceTypes.id, read_deleted="no",
session=session, base_model=models.InstanceTypes).\
filter_by(flavorid=flavor_id)
def _flavor_get_id_from_flavor(context, flavor_id, session=None):
result = _flavor_get_id_from_flavor_query(context, flavor_id,
session=session).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return result[0]
@require_context
def flavor_get(context, id):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(id=id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=id)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_name(context, name):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(name=name).\
first()
if not result:
raise exception.FlavorNotFoundByName(flavor_name=name)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
order_by(asc("deleted"), asc("id")).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
@require_admin_context
def flavor_destroy(context, name):
"""Marks specific flavor as deleted."""
session = get_session()
with session.begin():
ref = model_query(context, models.InstanceTypes, session=session,
read_deleted="no").\
filter_by(name=name).\
first()
if not ref:
raise exception.FlavorNotFoundByName(flavor_name=name)
ref.soft_delete(session=session)
model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
model_query(context, models.InstanceTypeProjects,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
def _flavor_access_query(context, session=None):
return model_query(context, models.InstanceTypeProjects, session=session,
read_deleted="no")
@require_admin_context
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
access_refs = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
@require_admin_context
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
access_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
@require_admin_context
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
count = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
def _flavor_extra_specs_get_query(context, flavor_id, session=None):
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
return model_query(context, models.InstanceTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return dict([(row['key'], row['value']) for row in rows])
@require_context
def flavor_extra_specs_get_item(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
first()
if not result:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
return {result["key"]: result["value"]}
@require_context
def flavor_extra_specs_delete(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
soft_delete(synchronize_session=False)
# did not find the extra spec
if result == 0:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
@require_context
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
instance_type_id = _flavor_get_id_from_flavor(context,
flavor_id, session)
spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=instance_type_id).\
filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
all()
existing_keys = set()
for spec_ref in spec_refs:
key = spec_ref["key"]
existing_keys.add(key)
spec_ref.update({"value": specs[key]})
for key, value in specs.iteritems():
if key in existing_keys:
continue
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
if attempt == max_retries - 1:
raise exception.FlavorExtraSpecUpdateCreateFailed(
id=flavor_id, retries=max_retries)
####################
@require_admin_context
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
cell.save()
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
def _cell_get_by_name_query(context, cell_name, session=None):
return model_query(context, models.Cell,
session=session).filter_by(name=cell_name)
@require_admin_context
def cell_update(context, cell_name, values):
session = get_session()
with session.begin():
cell_query = _cell_get_by_name_query(context, cell_name,
session=session)
if not cell_query.update(values):
raise exception.CellNotFound(cell_name=cell_name)
cell = cell_query.first()
return cell
@require_admin_context
def cell_delete(context, cell_name):
return _cell_get_by_name_query(context, cell_name).soft_delete()
@require_admin_context
def cell_get(context, cell_name):
result = _cell_get_by_name_query(context, cell_name).first()
if not result:
raise exception.CellNotFound(cell_name=cell_name)
return result
@require_admin_context
def cell_get_all(context):
return model_query(context, models.Cell, read_deleted="no").all()
########################
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
def _instance_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceMetadata, session=session,
read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
@require_context
@_retry_on_deadlock
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
soft_delete()
@require_context
@_retry_on_deadlock
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
#######################
# System-owned metadata
def _instance_system_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceSystemMetadata,
session=session).\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return dict((row['key'], row['value']) for row in rows)
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
####################
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
try:
agent_build_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
os=values['os'], architecture=values['architecture'])
return agent_build_ref
@require_admin_context
def agent_build_get_by_triple(context, hypervisor, os, architecture):
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
@require_admin_context
def agent_build_get_all(context, hypervisor=None):
if hypervisor:
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
all()
else:
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@require_admin_context
def agent_build_destroy(context, agent_build_id):
rows_affected = model_query(context, models.AgentBuild).filter_by(
id=agent_build_id).soft_delete()
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
@require_admin_context
def agent_build_update(context, agent_build_id, values):
rows_affected = model_query(context, models.AgentBuild).\
filter_by(id=agent_build_id).\
update(values)
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
####################
@require_context
def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
return model_query(context, models.BandwidthUsage, read_deleted="yes",
use_slave=use_slave).\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
first()
@require_context
def bw_usage_get_by_uuids(context, uuids, start_period):
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter(models.BandwidthUsage.uuid.in_(uuids)).\
filter_by(start_period=start_period).\
all()
@require_context
@_retry_on_deadlock
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
session = get_session()
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
with session.begin():
values = {'last_refreshed': last_refreshed,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'bw_in': bw_in,
'bw_out': bw_out}
rows = model_query(context, models.BandwidthUsage,
session=session, read_deleted="yes").\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
update(values, synchronize_session=False)
if rows:
return
bwusage = models.BandwidthUsage()
bwusage.start_period = start_period
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = last_refreshed
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
try:
bwusage.save(session=session)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to create
# the usage entry at the same time. First one wins.
pass
####################
@require_context
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == null(),
models.VolumeUsage.curr_last_refreshed > begin,
)).\
all()
@require_context
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
session = get_session()
refreshed = timeutils.utcnow()
with session.begin():
values = {}
# NOTE(dricco): We will be mostly updating current usage records vs
# updating total or creating records. Optimize accordingly.
if not update_totals:
values = {'curr_last_refreshed': refreshed,
'curr_reads': rd_req,
'curr_read_bytes': rd_bytes,
'curr_writes': wr_req,
'curr_write_bytes': wr_bytes,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
else:
values = {'tot_last_refreshed': refreshed,
'tot_reads': models.VolumeUsage.tot_reads + rd_req,
'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
rd_bytes,
'tot_writes': models.VolumeUsage.tot_writes + wr_req,
'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
wr_bytes,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
current_usage = model_query(context, models.VolumeUsage,
session=session, read_deleted="yes").\
filter_by(volume_id=id).\
first()
if current_usage:
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals.") % id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'])
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'])
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'])
else:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'] +
rd_req)
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'] + rd_bytes)
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'] +
wr_req)
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
current_usage.save(session=session)
session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
vol_usage.volume_id = id
vol_usage.instance_uuid = instance_id
vol_usage.project_id = project_id
vol_usage.user_id = user_id
vol_usage.availability_zone = availability_zone
if not update_totals:
vol_usage.curr_last_refreshed = refreshed
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
else:
vol_usage.tot_last_refreshed = refreshed
vol_usage.tot_reads = rd_req
vol_usage.tot_read_bytes = rd_bytes
vol_usage.tot_writes = wr_req
vol_usage.tot_write_bytes = wr_bytes
vol_usage.save(session=session)
return vol_usage
####################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception as e:
raise db_exc.DBError(e)
return s3_image_ref
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def aggregate_create(context, values, metadata=None):
session = get_session()
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(session=session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
return aggregate_get(context, aggregate.id)
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key):
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.Aggregate.id == aggregate_id)
query = query.options(contains_eager("_metadata"))
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_host_get_by_metadata_key(context, key):
rows = aggregate_get_by_metadata_key(context, key)
metadata = collections.defaultdict(set)
for agg in rows:
for agghost in agg._hosts:
metadata[agghost.host].add(agg._metadata[0]['value'])
return dict(metadata)
def aggregate_get_by_metadata_key(context, key):
"""Return rows that match metadata key.
:param key Matches metadata key.
"""
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
return query.all()
def aggregate_update(context, aggregate_id, values):
session = get_session()
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
raise exception.AggregateNameExists(aggregate_name=values['name'])
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
aggregate.update(values)
aggregate.save(session=session)
values['metadata'] = metadata
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
def aggregate_delete(context, aggregate_id):
session = get_session()
with session.begin():
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
# Delete Metadata
model_query(context,
models.AggregateMetadata, session=session).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, session=None,
read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted,
session=session).\
filter_by(aggregate_id=aggregate_id)
@require_aggregate_exists
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return dict([(r['key'], r['value']) for r in rows])
@require_aggregate_exists
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in xrange(max_retries):
try:
session = get_session()
with session.begin():
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no',
session=session)
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = \
query.filter(models.AggregateMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
new_entries = []
for key, value in metadata.iteritems():
if key in already_existing_keys:
continue
new_entries.append({"key": key,
"value": value,
"aggregate_id": aggregate_id})
if new_entries:
session.execute(
models.AggregateMetadata.__table__.insert(),
new_entries)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
msg = _("Add metadata failed for aggregate %(id)s after "
"%(retries)s retries") % {"id": aggregate_id,
"retries": max_retries}
LOG.warn(msg)
@require_aggregate_exists
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_aggregate_exists
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_aggregate_exists
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save()
return dict(fault_ref.iteritems())
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
return {}
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at"), desc("id")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row.iteritems())
output[row['instance_uuid']].append(data)
return output
##################
def action_start(context, values):
convert_objects_related_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save()
return action_ref
def action_finish(context, values):
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action_ref = model_query(context, models.InstanceAction,
session=session).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(request_id=values['request_id']).\
first()
if not action_ref:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
action_ref.update(values)
return action_ref
def actions_get(context, instance_uuid):
"""Get all instance actions for the provided uuid."""
actions = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at"), desc("id")).\
all()
return actions
def action_get_by_request_id(context, instance_uuid, request_id):
"""Get the action by request_id and given instance."""
action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
def _action_get_by_request_id(context, instance_uuid, request_id,
session=None):
result = model_query(context, models.InstanceAction, session=session).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
first()
return result
def action_event_start(context, values):
"""Start an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
values['action_id'] = action['id']
event_ref = models.InstanceActionEvent()
event_ref.update(values)
session.add(event_ref)
return event_ref
def action_event_finish(context, values):
"""Finish an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
event_ref = model_query(context, models.InstanceActionEvent,
session=session).\
filter_by(action_id=action['id']).\
filter_by(event=values['event']).\
first()
if not event_ref:
raise exception.InstanceActionEventNotFound(action_id=action['id'],
event=values['event'])
event_ref.update(values)
if values['result'].lower() == 'error':
action.update({'message': 'Error'})
return event_ref
def action_events_get(context, action_id):
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\
all()
return events
def action_event_get_by_id(context, action_id, event_id):
event = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
filter_by(id=event_id).\
first()
return event
##################
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
ec2_instance_ref.update({'id': id})
ec2_instance_ref.save()
return ec2_instance_ref
@require_context
def ec2_instance_get_by_uuid(context, instance_uuid):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return result
@require_context
def get_ec2_instance_id_by_uuid(context, instance_id):
result = ec2_instance_get_by_uuid(context, instance_id)
return result['id']
@require_context
def ec2_instance_get_by_id(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(id=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
@require_context
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = ec2_instance_get_by_id(context, ec2_id)
return result['uuid']
def _ec2_instance_get_query(context, session=None):
return model_query(context,
models.InstanceIdMapping,
session=session,
read_deleted='yes')
def _task_log_get_query(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
query = model_query(context, models.TaskLog, session=session).\
filter_by(task_name=task_name).\
filter_by(period_beginning=period_beginning).\
filter_by(period_ending=period_ending)
if host is not None:
query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
return query
@require_admin_context
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
@require_admin_context
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
@require_admin_context
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
task = models.TaskLog()
task.task_name = task_name
task.period_beginning = period_beginning
task.period_ending = period_ending
task.host = host
task.state = "RUNNING"
if message:
task.message = message
if task_items:
task.task_items = task_items
try:
task.save()
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
@require_admin_context
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
session = get_session()
with session.begin():
rows = _task_log_get_query(context, task_name, period_beginning,
period_ending, host, session=session).\
update(values)
if rows == 0:
# It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
def _get_default_deleted_value(table):
# TODO(dripton): It would be better to introspect the actual default value
# from the column, but I don't see a way to do that in the low-level APIs
# of SQLAlchemy 0.7. 0.8 has better introspection APIs, which we should
# use when Nova is ready to require 0.8.
# NOTE(mikal): this is a little confusing. This method returns the value
# that a _not_deleted_ row would have.
deleted_column_type = table.c.deleted.type
if isinstance(deleted_column_type, Integer):
return 0
elif isinstance(deleted_column_type, Boolean):
return False
elif isinstance(deleted_column_type, String):
return ""
else:
return None
@require_admin_context
def archive_deleted_rows_for_table(context, tablename, max_rows):
"""Move up to max_rows rows from one tables to the corresponding
shadow table. The context argument is only used for the decorator.
:returns: number of rows archived
"""
# NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils
# imports nova.db.sqlalchemy.api.
from nova.db.sqlalchemy import utils as db_utils
engine = get_engine()
conn = engine.connect()
metadata = MetaData()
metadata.bind = engine
table = Table(tablename, metadata, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
# "domain" rather than "id"
column = table.c.domain
else:
column = table.c.id
# NOTE(guochbo): Use InsertFromSelect and DeleteFromSelect to avoid
# database's limit of maximum parameter in one SQL statement.
query_insert = sql.select([table],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
query_delete = sql.select([column],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
insert_statement = sqlalchemyutils.InsertFromSelect(
shadow_table, query_insert)
delete_statement = db_utils.DeleteFromSelect(table, query_delete, column)
try:
# Group the insert and delete in a transaction.
with conn.begin():
conn.execute(insert_statement)
result_delete = conn.execute(delete_statement)
except IntegrityError:
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
msg = _("IntegrityError detected when archiving table %s") % tablename
LOG.warn(msg)
return rows_archived
rows_archived = result_delete.rowcount
return rows_archived
@require_admin_context
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: Number of rows archived.
"""
# The context argument is only used for the decorator.
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
rows_archived = 0
for tablename in tablenames:
rows_archived += archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows - rows_archived)
if rows_archived >= max_rows:
break
return rows_archived
####################
def _instance_group_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_members']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def instance_group_create(context, values, policies=None,
members=None):
"""Create a new group."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
session = get_session()
with session.begin():
try:
group = models.InstanceGroup()
group.update(values)
group.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=uuid)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this instance group.
group._policies = []
group._members = []
if policies:
_instance_group_policies_add(context, group.id, policies,
session=session)
if members:
_instance_group_members_add(context, group.id, members,
session=session)
return instance_group_get(context, uuid)
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return group
def instance_group_update(context, group_uuid, values):
"""Update the attributes of an group.
If values contains a metadata key, it updates the aggregate metadata
too. Similarly for the policies and members.
"""
session = get_session()
with session.begin():
group = model_query(context,
models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
policies = values.get('policies')
if policies is not None:
_instance_group_policies_add(context,
group.id,
values.pop('policies'),
set_delete=True,
session=session)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
group.id,
values.pop('members'),
set_delete=True,
session=session)
group.update(values)
if policies:
values['policies'] = policies
if members:
values['members'] = members
def instance_group_delete(context, group_uuid):
"""Delete an group."""
session = get_session()
with session.begin():
group_id = _instance_group_id(context, group_uuid, session=session)
count = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid,
session=session).soft_delete()
if count == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model, session=session).\
filter_by(group_id=group_id).\
soft_delete()
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
filter_by(project_id=project_id).\
all()
def _instance_group_model_get_query(context, model_class, group_id,
session=None, read_deleted='no'):
return model_query(context,
model_class,
read_deleted=read_deleted,
session=session).\
filter_by(group_id=group_id)
def _instance_group_id(context, group_uuid, session=None):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup.id,
base_model=models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not result:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return result.id
def _instance_group_members_add(context, id, members, set_delete=False,
session=None):
if not session:
session = get_session()
all_members = set(members)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMember.instance_id.in_(
all_members)).\
soft_delete(synchronize_session=False)
query = query.filter(
models.InstanceGroupMember.instance_id.in_(all_members))
already_existing = set()
for member_ref in query.all():
already_existing.add(member_ref.instance_id)
for instance_id in members:
if instance_id in already_existing:
continue
member_ref = models.InstanceGroupMember()
member_ref.update({'instance_id': instance_id,
'group_id': id})
session.add(member_ref)
return members
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_members_add(context, id, members,
set_delete=set_delete)
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupMember,
models.InstanceGroupMember.group_id,
id).\
filter_by(instance_id=instance_id).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
instance_id=instance_id)
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
models.InstanceGroupMember.instance_id,
base_model=models.InstanceGroupMember).\
filter_by(group_id=id).all()
return [instance[0] for instance in instances]
def _instance_group_policies_add(context, id, policies, set_delete=False,
session=None):
if not session:
session = get_session()
allpols = set(policies)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupPolicy,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
already_existing = set()
for policy_ref in query.all():
already_existing.add(policy_ref.policy)
for policy in policies:
if policy in already_existing:
continue
policy_ref = models.InstanceGroupPolicy()
policy_ref.update({'policy': policy,
'group_id': id})
session.add(policy_ref)
return policies
def instance_group_policies_add(context, group_uuid, policies,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_policies_add(context, id, policies,
set_delete=set_delete)
def instance_group_policy_delete(context, group_uuid, policy):
id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroupPolicy,
models.InstanceGroupPolicy.group_id,
id).\
filter_by(policy=policy).\
soft_delete()
if count == 0:
raise exception.InstanceGroupPolicyNotFound(group_uuid=group_uuid,
policy=policy)
def instance_group_policies_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
policies = model_query(context,
models.InstanceGroupPolicy.policy,
base_model=models.InstanceGroupPolicy).\
filter_by(group_id=id).all()
return [policy[0] for policy in policies]
####################
@require_admin_context
def pci_device_get_by_addr(context, node_id, dev_addr):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=dev_addr).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
return pci_dev_ref
@require_admin_context
def pci_device_get_by_id(context, id):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(id=id).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFoundById(id=id)
return pci_dev_ref
@require_admin_context
def pci_device_get_all_by_node(context, node_id):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
all()
@require_context
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter_by(instance_uuid=instance_uuid).\
all()
def _instance_pcidevs_get_multi(context, instance_uuids, session=None):
return model_query(context, models.PciDevice, session=session).\
filter_by(status='allocated').\
filter(models.PciDevice.instance_uuid.in_(instance_uuids))
@require_admin_context
def pci_device_destroy(context, node_id, address):
result = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
soft_delete()
if not result:
raise exception.PciDeviceNotFound(node_id=node_id, address=address)
@require_admin_context
def pci_device_update(context, node_id, address, values):
session = get_session()
with session.begin():
device = model_query(context, models.PciDevice, session=session,
read_deleted="no").\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
first()
if not device:
device = models.PciDevice()
device.update(values)
session.add(device)
return device
| apache-2.0 | 2,602,919,378,681,646,600 | 35.253272 | 79 | 0.577223 | false |
CospanDesign/nysa-gui | NysaGui/host/sf_camera_controller/view/camera_widget.py | 1 | 1172 | # Copyright (c) 2014 Cospan Design
# This file is part of Nysa (wiki.cospandesign.com/index.php?title=Nysa).
#
# Nysa is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# Nysa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nysa; If not, see <http://www.gnu.org/licenses/>.
""" camera widget
"""
import sys
import os
from PyQt4.QtGui import *
from camera_control_view import SFCameraControlView
from camera_viewer import CameraViewer
class CameraWidget(QWidget):
def __init__(self, status, actions):
super (CameraWidget, self).__init__()
layout = QHBoxLayout()
layout.addWidget(CameraViewer(status, actions))
layout.addWidget(SFCameraControlView(status, actions))
self.setLayout(layout)
| gpl-2.0 | 2,707,070,279,751,529,000 | 27.585366 | 73 | 0.733788 | false |
keon/algorithms | algorithms/tree/is_subtree.py | 1 | 1133 | """
Given two binary trees s and t, check if t is a subtree of s.
A subtree of a tree t is a tree consisting of a node in t and
all of its descendants in t.
Example 1:
Given s:
3
/ \
4 5
/ \
1 2
Given t:
4
/ \
1 2
Return true, because t is a subtree of s.
Example 2:
Given s:
3
/ \
4 5
/ \
1 2
/
0
Given t:
3
/
4
/ \
1 2
Return false, because even though t is part of s,
it does not contain all descendants of t.
Follow up:
What if one tree is significantly lager than the other?
"""
import collections
def is_subtree(big, small):
flag = False
queue = collections.deque()
queue.append(big)
while queue:
node = queue.popleft()
if node.val == small.val:
flag = comp(node, small)
break
else:
queue.append(node.left)
queue.append(node.right)
return flag
def comp(p, q):
if p is None and q is None:
return True
if p is not None and q is not None:
return p.val == q.val and comp(p.left,q.left) and comp(p.right, q.right)
return False
| mit | -6,491,295,747,256,641,000 | 14.957746 | 80 | 0.572816 | false |
Futubank/django-futupayments | futupayments/config.py | 1 | 1409 | from django.core.exceptions import ImproperlyConfigured
try:
from django.urls import reverse
except:
from django.core.urlresolvers import reverse
__all__ = ['config']
def required(name):
from django.conf import settings
result = getattr(settings, name, None)
if result is None:
raise ImproperlyConfigured('settings.{} required'.format(name))
return result
def optional(name, default):
from django.conf import settings
return getattr(settings, name, default)
class Config(object):
@property
def FUTUPAYMENTS_RECIEPTS(self):
return optional('FUTUPAYMENTS_RECIEPTS', False)
@property
def FUTUPAYMENTS_TEST_MODE(self):
return optional('FUTUPAYMENTS_TEST_MODE', False)
@property
def FUTUPAYMENTS_HOST(self):
return optional('FUTUPAYMENTS_HOST', 'https://secure.futubank.com')
@property
def FUTUPAYMENTS_MERCHANT_ID(self):
return required('FUTUPAYMENTS_MERCHANT_ID')
@property
def FUTUPAYMENTS_SECRET_KEY(self):
return required('FUTUPAYMENTS_SECRET_KEY')
@property
def FUTUPAYMENTS_SUCCESS_URL(self):
from . import views
return optional('FUTUPAYMENTS_SUCCESS_URL', reverse(views.success))
@property
def FUTUPAYMENTS_FAIL_URL(self):
from . import views
return optional('FUTUPAYMENTS_FAIL_URL', reverse(views.fail))
config = Config()
| mit | -1,403,819,247,100,922,000 | 24.618182 | 75 | 0.69127 | false |
timorieber/wagtail | wagtail/contrib/routable_page/tests.py | 2 | 12697 | from unittest import mock
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.urls.exceptions import NoReverseMatch
from wagtail.contrib.routable_page.templatetags.wagtailroutablepage_tags import routablepageurl
from wagtail.core.models import Page, Site
from wagtail.tests.routablepage.models import (
RoutablePageTest, RoutablePageWithOverriddenIndexRouteTest)
class TestRoutablePage(TestCase):
model = RoutablePageTest
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=self.model(
title="Routable Page",
live=True,
))
def test_resolve_index_route_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/')
self.assertEqual(view, self.routable_page.index_route)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_resolve_archive_by_year_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/year/2014/')
self.assertEqual(view, self.routable_page.archive_by_year)
self.assertEqual(args, ('2014', ))
self.assertEqual(kwargs, {})
def test_resolve_archive_by_author_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/author/joe-bloggs/')
self.assertEqual(view, self.routable_page.archive_by_author)
self.assertEqual(args, ())
self.assertEqual(kwargs, {'author_slug': 'joe-bloggs'})
def test_resolve_external_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external/joe-bloggs/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ('joe-bloggs', ))
self.assertEqual(kwargs, {})
def test_resolve_external_view_other_route(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external-no-arg/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_reverse_index_route_view(self):
url = self.routable_page.reverse_subpage('index_route')
self.assertEqual(url, '')
def test_reverse_archive_by_year_view(self):
url = self.routable_page.reverse_subpage('archive_by_year', args=('2014', ))
self.assertEqual(url, 'archive/year/2014/')
def test_reverse_archive_by_author_view(self):
url = self.routable_page.reverse_subpage('archive_by_author', kwargs={'author_slug': 'joe-bloggs'})
self.assertEqual(url, 'archive/author/joe-bloggs/')
def test_reverse_overridden_name(self):
url = self.routable_page.reverse_subpage('name_overridden')
self.assertEqual(url, 'override-name-test/')
def test_reverse_overridden_name_default_doesnt_work(self):
with self.assertRaises(NoReverseMatch):
self.routable_page.reverse_subpage('override_name_test')
def test_reverse_external_view(self):
url = self.routable_page.reverse_subpage('external_view', args=('joe-bloggs', ))
self.assertEqual(url, 'external/joe-bloggs/')
def test_reverse_external_view_other_route(self):
url = self.routable_page.reverse_subpage('external_view')
self.assertEqual(url, 'external-no-arg/')
def test_get_index_route_view(self):
response = self.client.get(self.routable_page.url)
self.assertContains(response, "DEFAULT PAGE TEMPLATE")
def test_get_routable_page_with_overridden_index_route(self):
page = self.home_page.add_child(
instance=RoutablePageWithOverriddenIndexRouteTest(
title="Routable Page with overridden index",
live=True
)
)
response = self.client.get(page.url)
self.assertContains(response, "OVERRIDDEN INDEX ROUTE")
self.assertNotContains(response, "DEFAULT PAGE TEMPLATE")
def test_get_archive_by_year_view(self):
response = self.client.get(self.routable_page.url + 'archive/year/2014/')
self.assertContains(response, "ARCHIVE BY YEAR: 2014")
def test_earlier_view_takes_precedence(self):
response = self.client.get(self.routable_page.url + 'archive/year/1984/')
self.assertContains(response, "we were always at war with eastasia")
def test_get_archive_by_author_view(self):
response = self.client.get(self.routable_page.url + 'archive/author/joe-bloggs/')
self.assertContains(response, "ARCHIVE BY AUTHOR: joe-bloggs")
def test_get_external_view(self):
response = self.client.get(self.routable_page.url + 'external/joe-bloggs/')
self.assertContains(response, "EXTERNAL VIEW: joe-bloggs")
def test_get_external_view_other_route(self):
response = self.client.get(self.routable_page.url + 'external-no-arg/')
self.assertContains(response, "EXTERNAL VIEW: ARG NOT SET")
def test_routable_page_can_have_instance_bound_descriptors(self):
# This descriptor pretends that it does not exist in the class, hence
# it raises an AttributeError when class bound. This is, for instance,
# the behavior of django's FileFields.
class InstanceDescriptor:
def __get__(self, instance, cls=None):
if instance is None:
raise AttributeError
return 'value'
def __set__(self, instance, value):
raise AttributeError
try:
RoutablePageTest.descriptor = InstanceDescriptor()
RoutablePageTest.get_subpage_urls()
finally:
del RoutablePageTest.descriptor
class TestRoutablePageTemplateTag(TestCase):
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.context = {'request': self.request}
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, '/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, '/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, '/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, '/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = '/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
@override_settings(ALLOWED_HOSTS=['testserver', 'localhost', 'development.local'])
class TestRoutablePageTemplateTagForSecondSiteAtSameRoot(TestCase):
"""
When multiple sites exist on the same root page, relative URLs within that subtree should
omit the domain, in line with #4390
"""
def setUp(self):
default_site = Site.objects.get(is_default_site=True)
second_site = Site.objects.create( # add another site with the same root page
hostname='development.local',
port=default_site.port,
root_page_id=default_site.root_page_id,
)
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.context = {'request': self.request}
self.request.META['HTTP_HOST'] = second_site.hostname
self.request.META['SERVER_PORT'] = second_site.port
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, '/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, '/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, '/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, '/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = '/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
@override_settings(ALLOWED_HOSTS=['testserver', 'localhost', 'events.local'])
class TestRoutablePageTemplateTagForSecondSiteAtDifferentRoot(TestCase):
"""
When multiple sites exist, relative URLs between such sites should include the domain portion
"""
def setUp(self):
self.home_page = Page.objects.get(id=2)
events_page = self.home_page.add_child(instance=Page(title='Events', live=True))
second_site = Site.objects.create(
hostname='events.local',
port=80,
root_page=events_page,
)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.context = {'request': self.request}
self.request.META['HTTP_HOST'] = second_site.hostname
self.request.META['SERVER_PORT'] = second_site.port
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, 'http://localhost/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, 'http://localhost/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, 'http://localhost/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, 'http://localhost/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = 'http://localhost/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
| bsd-3-clause | 3,030,710,525,895,143,400 | 39.565495 | 107 | 0.643695 | false |
WalrusRPG/WFont | wrpg/wfont/pack.py | 1 | 1439 | #!/bin/env python
import struct
import zlib
from wrpg.wfont.common import (header_structure, character_entry_strucutre, PFONT_VERSION)
def pack_fontdata(archive):
default_char = struct.pack(character_entry_strucutre(), 0, 0, 0, 0, 0, 0)
char_array = [default_char] * 256
for char in archive['chars']:
char_array[char['index']] = struct.pack(character_entry_strucutre(),
char['x'],
char['y'],
char['width'],
char['height'],
char['x_offset'],
char['y_offset'])
return b''.join(char_array)
def pack_header(archive, archived_font_data):
checksum = zlib.crc32(
b'' + struct.pack('>IIII', PFONT_VERSION,
archive['baseline'], 0, archive['space_width']) + archived_font_data)
return struct.pack(header_structure(),
b'WFONT',
checksum,
PFONT_VERSION,
archive['baseline'],
0x0,
archive['space_width'])
def pack_font(archive):
font_data = pack_fontdata(archive)
font_header = pack_header(archive, font_data)
return font_header + font_data
| mit | 8,490,607,961,962,444,000 | 36.868421 | 95 | 0.466991 | false |
reisalex/test-sfm | examples/terminator/initdb.py | 1 | 4066 | from synbiomts import dbms
from Bio import SeqIO
import xlrd
from openpyxl import load_workbook
get = lambda cell: cell[0].value # for openpyxl
# Initialize DataBase
DB = dbms.DataBase()
'''
-----------------------------------------------------------------------------------
Cambray, Guillaume, Joao C. Guimaraes, Vivek K. Mutalik,Colin Lam,
Quynh-Anh Mai, Tim Thimmaiah, James M. Carothers, Adam P. Arkin, and Drew Endy.
"Measurement and modeling of intrinsic transcription terminators."
Nucleic acids research 41, no. 9 (2013): 5139-5148.'''
# test vector on Addgene: http://www.addgene.org/47846/
pFAB763 = SeqIO.read('datasets/pFAB763-trp-L126.gb','genbank')
for f in pFAB763.features:
label = f.qualifiers['label'][0]
if label == 'PLtetO-1 promoter':
start = f.location.end # TSS for mRNA transcript
elif label == 'GoldenGate-tgat':
i = f.location.end # insertion site for terminator
elif label == 'GoldenGate-ggcg':
j = f.location.start # insertion site for terminator
elif label == 'rrnB T1 terminator':
end = f.location.end # transcript end of bicistronic operon
operon = str(pFAB763.seq[start:end])
i -= start
j -= start
# supplementary information
wb = xlrd.open_workbook('datasets/Cambray_2013.xls')
sheet = wb.sheet_by_index(1)
dataset = {
'NAME': sheet.col_values(colx=1, start_rowx=4, end_rowx=58),
'SOURCE': sheet.col_values(colx=3, start_rowx=4, end_rowx=58),
'COORDINATES': sheet.col_values(colx=4, start_rowx=4, end_rowx=58),
'TERMINATOR.SEQ': sheet.col_values(colx=6, start_rowx=4, end_rowx=58),
'EFFICIENCY': sheet.col_values(colx=8, start_rowx=4, end_rowx=58),
'ORGANISM': 'Escherichia coli BW25113',
'TEMP': 37.0,
'DATASET': 'Cambray_2013'
}
terminators = dataset['TERMINATOR.SEQ']
dataset['OPERON'] = [operon[:i]+t+operon[j:] for t in terminators]
dataset['TERMINATOR.START'] = i
dataset['TERMINATOR.END'] = [i+len(t)-1 for t in terminators]
DB += dataset # add dataset to DataBase
'''
-----------------------------------------------------------------------------------
Ying-Ja Chen, Peng Liu, Alec A K Nielsen, Jennifer A N Brophy,
Kevin Clancy, Todd Peterson & Christopher A Voigt
"Characterization of 582 natural and synthetic terminators andquantification of their design constraints"
Nature Methods, 2013, Vol. 10, No. 7; doi:10.1038/nmeth.2515'''
# relationship defined between terminator strength (TS) and efficiency (TE)
TE = lambda TS: 1-1/TS
# Natural terminators (Supplementary Table S2)
wb = load_workbook(filename='datasets/Chen_S2_2013.xlsx',read_only=True)
ws = wb['Fig2a-natural']
dataset = {
'NAME': map(get,ws['A2':'A318']),
'TERMINATOR.SEQ': map(get,ws['F2':'F318']),
'EFFICIENCY': map(lambda TS: 1-1/TS, map(get,ws['L2':'L318'])),
'SOURCE': "NC_000913",
'ORGANISM': 'Escherichia coli DH5-alpha',
'TEMP': 37.0,
'DATASET': 'Chen_2013'
}
# Synthetic terminators (Supplementary Table S3)
wb = load_workbook(filename='datasets/Chen_S3_2013.xlsx',read_only=True)
ws = wb['Sheet1']
dataset['NAME'] += map(get,ws['A2':'A266'])
dataset['TERMINATOR.SEQ'] += map(get,ws['E2':'E266'])
dataset['EFFICIENCY'] += map(TE, map(get,ws['K2':'K266']))
# test vector on Addgene: http://www.addgene.org/46002/
pGR = SeqIO.read('datasets/pGR.gb','genbank')
for f in pGR.features:
label = f.qualifiers['label'][0]
if label == 'araBAD promoter':
start = f.location.end # TSS for mRNA transcript
elif label == 'EcoRI':
i = f.location.end # insertion site for terminator
elif label == 'SpeI':
j = f.location.start # insertion site for terminator
elif label == 'rrnB T1 terminator':
end = f.location.end # transcript end of bicistronic operon
operon = str(pGR.seq[start:end])
i -= start
j -= start
terminators = dataset['TERMINATOR.SEQ']
dataset['OPERON'] = [operon[:i]+t+operon[j:] for t in terminators]
dataset['TERMINATOR.START'] = i
dataset['TERMINATOR.END'] = [i+len(t)-1 for t in terminators]
DB += dataset
DB.save('terminators',type='pickle') | gpl-3.0 | 3,891,673,166,296,407,000 | 34.365217 | 105 | 0.653222 | false |
timohtey/mediadrop_copy | mediacore_env/Lib/site-packages/flup-1.0/flup/server/fcgi_base.py | 1 | 38949 | # Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id: fcgi_base.py 2348 2007-05-11 16:52:46Z asaddi $
__author__ = 'Allan Saddi <[email protected]>'
__version__ = '$Revision: 2348 $'
import sys
import os
import signal
import struct
import cStringIO as StringIO
import select
import socket
import errno
import traceback
try:
import thread
import threading
thread_available = True
except ImportError:
import dummy_thread as thread
import dummy_threading as threading
thread_available = False
# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case.
if not hasattr(socket, 'SHUT_WR'):
socket.SHUT_WR = 1
__all__ = ['BaseFCGIServer']
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
if __debug__:
import time
# Set non-zero to write debug output to a file.
DEBUG = 0
DEBUGLOG = '/tmp/fcgi.log'
def _debug(level, msg):
if DEBUG < level:
return
try:
f = open(DEBUGLOG, 'a')
f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
f.close()
except:
pass
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = ''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return ''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return ''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find('\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
if length is not None and len(self._buf) >= length + self._pos:
newPos = self._pos + length
break
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class MultiplexedInputStream(InputStream):
"""
A version of InputStream meant to be used with MultiplexedConnections.
Assumes the MultiplexedConnection (the producer) and the Request
(the consumer) are running in different threads.
"""
def __init__(self, conn):
super(MultiplexedInputStream, self).__init__(conn)
# Arbitrates access to this InputStream (it's used simultaneously
# by a Request and its owning Connection object).
lock = threading.RLock()
# Notifies Request thread that there is new data available.
self._lock = threading.Condition(lock)
def _waitForData(self):
# Wait for notification from add_data().
self._lock.wait()
def read(self, n=-1):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).read(n)
finally:
self._lock.release()
def readline(self, length=None):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).readline(length)
finally:
self._lock.release()
def add_data(self, data):
self._lock.acquire()
try:
super(MultiplexedInputStream, self).add_data(data)
self._lock.notify()
finally:
self._lock.release()
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = toWrite
rec.contentData = data[:toWrite]
self._conn.writeRecord(rec)
data = data[toWrite:]
length -= toWrite
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = ''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos+nameLength]
pos += nameLength
value = s[pos:pos+valueLength]
pos += valueLength
return (pos, (name, value))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, sock):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(sock, FCGI_HEADER_LEN)
except:
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
if self.contentLength:
try:
self.contentData, length = self._recvall(sock,
self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(sock, self.paddingLength)
except:
raise EOFError
def _sendall(sock, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
length = len(data)
while length:
try:
sent = sock.send(data)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([], [sock], [])
continue
else:
raise
data = data[sent:]
length -= sent
_sendall = staticmethod(_sendall)
def write(self, sock):
"""Encode and write a Record to a socket."""
self.paddingLength = -self.contentLength & 7
if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
self._sendall(sock, header)
if self.contentLength:
self._sendall(sock, self.contentData)
if self.paddingLength:
self._sendall(sock, '\x00'*self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except:
traceback.print_exc(file=self.stderr)
self.stderr.flush()
if not self.stdout.dataWritten:
self.server.error(self)
protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' %
(protocolStatus, appStatus))
try:
self._flush()
self._end(appStatus, protocolStatus)
except socket.error, e:
if e[0] != errno.EPIPE:
raise
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.close()
self.stderr.close()
class CGIRequest(Request):
"""A normal CGI request disguised as a FastCGI request."""
def __init__(self, server):
# These are normally filled in by Connection.
self.requestId = 1
self.role = FCGI_RESPONDER
self.flags = 0
self.aborted = False
self.server = server
self.params = dict(os.environ)
self.stdin = sys.stdin
self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity!
self.stderr = sys.stderr
self.data = StringIO.StringIO()
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
sys.exit(appStatus)
def _flush(self):
# Not buffered, do nothing.
pass
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, sock, addr, server):
self._sock = sock
self._addr = addr
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def _cleanupSocket(self):
"""Close the Connection's socket."""
try:
self._sock.shutdown(socket.SHUT_WR)
except:
return
try:
while True:
r, w, e = select.select([self._sock], [], [])
if not r or not self._sock.recv(1024):
break
except:
pass
self._sock.close()
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except (EOFError, KeyboardInterrupt):
break
except (select.error, socket.error), e:
if e[0] == errno.EBADF: # Socket was closed by Request.
break
raise
self._cleanupSocket()
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
while self._keepGoing:
try:
r, w, e = select.select([self._sock], [], [], 1.0)
except ValueError:
# Sigh. ValueError gets thrown sometimes when passing select
# a closed socket.
raise EOFError
if r: break
if not self._keepGoing:
return
rec = Record()
rec.read(self._sock)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._sock)
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
del self._requests[req.requestId]
if __debug__: _debug(2, 'end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
self._cleanupSocket()
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
else:
self._start_request(req)
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.stdin.add_data(inrec.contentData)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(rec)
class MultiplexedConnection(Connection):
"""
A version of Connection capable of handling multiple requests
simultaneously.
"""
_multiplexed = True
_inputStreamClass = MultiplexedInputStream
def __init__(self, sock, addr, server):
super(MultiplexedConnection, self).__init__(sock, addr, server)
# Used to arbitrate access to self._requests.
lock = threading.RLock()
# Notification is posted everytime a request completes, allowing us
# to quit cleanly.
self._lock = threading.Condition(lock)
def _cleanupSocket(self):
# Wait for any outstanding requests before closing the socket.
self._lock.acquire()
while self._requests:
self._lock.wait()
self._lock.release()
super(MultiplexedConnection, self)._cleanupSocket()
def writeRecord(self, rec):
# Must use locking to prevent intermingling of Records from different
# threads.
self._lock.acquire()
try:
# Probably faster than calling super. ;)
rec.write(self._sock)
finally:
self._lock.release()
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
self._lock.acquire()
try:
super(MultiplexedConnection, self).end_request(req, appStatus,
protocolStatus,
remove)
self._lock.notify()
finally:
self._lock.release()
def _do_begin_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_begin_request(inrec)
finally:
self._lock.release()
def _do_abort_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_abort_request(inrec)
finally:
self._lock.release()
def _start_request(self, req):
thread.start_new_thread(req.run, ())
def _do_params(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_params(inrec)
finally:
self._lock.release()
def _do_stdin(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_stdin(inrec)
finally:
self._lock.release()
def _do_data(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_data(inrec)
finally:
self._lock.release()
class BaseFCGIServer(object):
request_class = Request
cgirequest_class = CGIRequest
# The maximum number of bytes (per Record) to write to the server.
# I've noticed mod_fastcgi has a relatively small receive buffer (8K or
# so).
maxwrite = 8192
# Limits the size of the InputStream's string buffer to this size + the
# server's maximum Record size. Since the InputStream is not seekable,
# we throw away already-read data once this certain amount has been read.
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, application, environ=None,
multithreaded=True, multiprocess=False,
bindAddress=None, umask=None, multiplexed=False,
debug=True, roles=(FCGI_RESPONDER,)):
"""
bindAddress, if present, must either be a string or a 2-tuple. If
present, run() will open its own listening socket. You would use
this if you wanted to run your application as an 'external' FastCGI
app. (i.e. the webserver would no longer be responsible for starting
your app) If a string, it will be interpreted as a filename and a UNIX
socket will be opened. If a tuple, the first element, a string,
is the interface name/IP to bind to, and the second element (an int)
is the port number.
If binding to a UNIX socket, umask may be set to specify what
the umask is to be changed to before the socket is created in the
filesystem. After the socket is created, the previous umask is
restored.
Set multiplexed to True if you want to handle multiple requests
per connection. Some FastCGI backends (namely mod_fastcgi) don't
multiplex requests at all, so by default this is off (which saves
on thread creation/locking overhead). If threads aren't available,
this keyword is ignored; it's not possible to multiplex requests
at all.
"""
if environ is None:
environ = {}
self.application = application
self.environ = environ
self.multithreaded = multithreaded
self.multiprocess = multiprocess
self.debug = debug
self.roles = roles
self._bindAddress = bindAddress
self._umask = umask
# Used to force single-threadedness
self._appLock = thread.allocate_lock()
if thread_available:
try:
import resource
# Attempt to glean the maximum number of connections
# from the OS.
maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
except ImportError:
maxConns = 100 # Just some made up number.
maxReqs = maxConns
if multiplexed:
self._connectionClass = MultiplexedConnection
maxReqs *= 5 # Another made up number.
else:
self._connectionClass = Connection
self.capability = {
FCGI_MAX_CONNS: maxConns,
FCGI_MAX_REQS: maxReqs,
FCGI_MPXS_CONNS: multiplexed and 1 or 0
}
else:
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
def _setupSocket(self):
if self._bindAddress is None: # Run as a normal FastCGI?
isFCGI = True
sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
socket.SOCK_STREAM)
try:
sock.getpeername()
except socket.error, e:
if e[0] == errno.ENOTSOCK:
# Not a socket, assume CGI context.
isFCGI = False
elif e[0] != errno.ENOTCONN:
raise
# FastCGI/CGI discrimination is broken on Mac OS X.
# Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
# if you want to run your app as a simple CGI. (You can do
# this with Apache's mod_env [not loaded by default in OS X
# client, ha ha] and the SetEnv directive.)
if not isFCGI or \
os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
req = self.cgirequest_class(self)
req.run()
sys.exit(0)
else:
# Run as a server
oldUmask = None
if type(self._bindAddress) is str:
# Unix socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(self._bindAddress)
except OSError:
pass
if self._umask is not None:
oldUmask = os.umask(self._umask)
else:
# INET socket
assert type(self._bindAddress) is tuple
assert len(self._bindAddress) == 2
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(self._bindAddress)
sock.listen(socket.SOMAXCONN)
if oldUmask is not None:
os.umask(oldUmask)
return sock
def _cleanupSocket(self, sock):
"""Closes the main socket."""
sock.close()
def handler(self, req):
"""Special handler for WSGI."""
if req.role not in self.roles:
return FCGI_UNKNOWN_ROLE, 0
# Mostly taken from example CGI gateway.
environ = req.params
environ.update(self.environ)
environ['wsgi.version'] = (1,0)
environ['wsgi.input'] = req.stdin
if self._bindAddress is None:
stderr = req.stderr
else:
stderr = TeeOutputStream((sys.stderr, req.stderr))
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \
thread_available and self.multithreaded
environ['wsgi.multiprocess'] = isinstance(req, CGIRequest) or \
self.multiprocess
environ['wsgi.run_once'] = isinstance(req, CGIRequest)
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
assert type(data) is str, 'write() argument must be string'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header,value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
req.stdout.write(s)
req.stdout.write(data)
req.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) is str, 'Status must be a string'
assert len(status) >= 4, 'Status must be at least 4 characters'
assert int(status[:3]), 'Status must begin with 3-digit code'
assert status[3] == ' ', 'Status must have a space after code'
assert type(response_headers) is list, 'Headers must be a list'
if __debug__:
for name,val in response_headers:
assert type(name) is str, 'Header names must be strings'
assert type(val) is str, 'Header values must be strings'
headers_set[:] = [status, response_headers]
return write
if not self.multithreaded:
self._appLock.acquire()
try:
try:
result = self.application(environ, start_response)
try:
for data in result:
if data:
write(data)
if not headers_sent:
write('') # in case body was empty
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, e:
if e[0] != errno.EPIPE:
raise # Don't let EPIPE propagate beyond server
finally:
if not self.multithreaded:
self._appLock.release()
return FCGI_REQUEST_COMPLETE, 0
def _sanitizeEnv(self, environ):
"""Ensure certain values are present, if required by WSGI."""
if not environ.has_key('SCRIPT_NAME'):
environ['SCRIPT_NAME'] = ''
if not environ.has_key('PATH_INFO'):
environ['PATH_INFO'] = ''
if not environ.has_key('QUERY_STRING'):
environ['QUERY_STRING'] = ''
# If any of these are missing, it probably signifies a broken
# server...
for name,default in [('REQUEST_METHOD', 'GET'),
('SERVER_NAME', 'localhost'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL', 'HTTP/1.0')]:
if not environ.has_key(name):
environ['wsgi.errors'].write('%s: missing FastCGI param %s '
'required by WSGI!\n' %
(self.__class__.__name__, name))
environ[name] = default
def error(self, req):
"""
Called by Request if an exception occurs within the handler. May and
should be overridden.
"""
if self.debug:
import cgitb
req.stdout.write('Content-Type: text/html\r\n\r\n' +
cgitb.html(sys.exc_info()))
else:
errorpage = """<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>Unhandled Exception</title>
</head><body>
<h1>Unhandled Exception</h1>
<p>An unhandled exception was thrown by the application.</p>
</body></html>
"""
req.stdout.write('Content-Type: text/html\r\n\r\n' +
errorpage)
| gpl-3.0 | 2,389,262,307,269,843,000 | 32.148085 | 83 | 0.551311 | false |
openstack/vitrage | vitrage/api_handler/apis/base.py | 1 | 4616 | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log
from vitrage.common.constants import EdgeProperties as EProps
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources.nova.zone import NOVA_ZONE_DATASOURCE
from vitrage.datasources import OPENSTACK_CLUSTER
LOG = log.getLogger(__name__)
# Used for Sunburst to show only specific resources
TREE_TOPOLOGY_QUERY = {
'and': [
{'==': {VProps.VITRAGE_CATEGORY: EntityCategory.RESOURCE}},
{'==': {VProps.VITRAGE_IS_DELETED: False}},
{'==': {VProps.VITRAGE_IS_PLACEHOLDER: False}},
{
'or': [
{'==': {VProps.VITRAGE_TYPE: OPENSTACK_CLUSTER}},
{'==': {VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE}},
{'==': {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}},
{'==': {VProps.VITRAGE_TYPE: NOVA_ZONE_DATASOURCE}}
]
}
]
}
TOPOLOGY_AND_ALARMS_QUERY = {
'and': [
{'==': {VProps.VITRAGE_IS_DELETED: False}},
{'==': {VProps.VITRAGE_IS_PLACEHOLDER: False}},
{
'or': [
{'==': {VProps.VITRAGE_CATEGORY: EntityCategory.ALARM}},
{'==': {VProps.VITRAGE_CATEGORY: EntityCategory.RESOURCE}}
]
}
]
}
ALARMS_ALL_QUERY = {
'and': [
{'==': {VProps.VITRAGE_CATEGORY: EntityCategory.ALARM}},
{'==': {VProps.VITRAGE_IS_DELETED: False}}
]
}
EDGE_QUERY = {'==': {EProps.VITRAGE_IS_DELETED: False}}
RESOURCES_ALL_QUERY = {
'and': [
{'==': {VProps.VITRAGE_CATEGORY: EntityCategory.RESOURCE}},
{'==': {VProps.VITRAGE_IS_DELETED: False}},
{'==': {VProps.VITRAGE_IS_PLACEHOLDER: False}}
]
}
class EntityGraphApisBase(object):
def __init__(self, entity_graph, api_lock, db=None):
self.entity_graph = entity_graph
self.db = db
self.api_lock = api_lock
@classmethod
def _get_query_with_project(cls, vitrage_category, project_id, is_admin):
"""Generate query with tenant data
Creates query for entity graph which takes into consideration the
vitrage_category, project_id and if the tenant is admin
:type vitrage_category: string
:type project_id: string
:type is_admin: boolean
:rtype: dictionary
"""
query = {
'and': [
{'==': {VProps.VITRAGE_IS_DELETED: False}},
{'==': {VProps.VITRAGE_IS_PLACEHOLDER: False}},
{'==': {VProps.VITRAGE_CATEGORY: vitrage_category}}
]
}
cls._add_project_to_query(query, project_id, is_admin)
return query
@staticmethod
def _add_project_to_query(query, project_id, is_admin):
"""Add project_id filter to the query
Each query should contain the project_id condition
:type query: string representing a json query
:type project_id: string
:type is_admin: boolean
:rtype: string representing a json query
"""
if is_admin:
project_query = \
{'or': [{'==': {VProps.PROJECT_ID: project_id}},
{'==': {VProps.PROJECT_ID: None}}]}
else:
project_query = \
{'==': {VProps.PROJECT_ID: project_id}}
if 'and' in query:
query_with_project_id = query
query_with_project_id['and'].append(project_query)
else:
query_with_project_id = {'and': [project_query, query]}
return query_with_project_id
def lock_graph(f):
@functools.wraps(f)
def api_backend_func(*args, **kwargs):
try:
args[0].api_lock.acquire()
result = f(*args, **kwargs)
return result
finally:
args[0].api_lock.release()
return api_backend_func
| apache-2.0 | 2,543,015,688,747,234,300 | 30.401361 | 77 | 0.594887 | false |
nttks/edx-platform | common/test/acceptance/tests/discussion/test_ga_discussion.py | 1 | 2528 | # -*- coding: utf-8 -*-
"""
End-to-end tests for discussion
"""
import bok_choy.browser
from ..helpers import UniqueCourseTest
from ...fixtures.course import CourseFixture
from ...pages.lms.auto_auth import AutoAuthPage
from ..ga_helpers import GaccoTestMixin, SUPER_USER_INFO
from ...pages.lms.ga_discussion import DiscussionTabHomePage
from ...pages.lms.ga_django_admin import DjangoAdminPage
class DiscussionPageTest(UniqueCourseTest, GaccoTestMixin):
"""
Tests that the discussion page.
"""
def _login_get_userid(self, user_info):
auto_auth_page = AutoAuthPage(self.browser, username=user_info['username'], email=user_info['email']).visit()
return auto_auth_page.get_user_id()
def setUp(self):
super(DiscussionPageTest, self).setUp()
CourseFixture(**self.course_info).install()
def test_not_present_input_upload_file(self):
self.switch_to_user(SUPER_USER_INFO)
DjangoAdminPage(self.browser).visit().click_add('ga_optional', 'courseoptionalconfiguration').input({
'enabled': False,
'key': 'disccusion-image-upload-settings',
'course_key': self.course_id,
}).save()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.discussion_page = DiscussionTabHomePage(self.browser, self.course_id)
self.discussion_page.visit()
self.discussion_page.click_new_post_button()
self.discussion_page.view_dialog_image_insert()
bok_choy.browser.save_screenshot(self.browser, 'test_not_present_input_upload_file')
self.assertFalse(self.discussion_page.view_dialog_image_insert().exists_input_file_on_dialog())
def test_present_input_upload_file(self):
self.switch_to_user(SUPER_USER_INFO)
DjangoAdminPage(self.browser).visit().click_add('ga_optional', 'courseoptionalconfiguration').input({
'enabled': True,
'key': 'disccusion-image-upload-settings',
'course_key': self.course_id,
}).save()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.discussion_page = DiscussionTabHomePage(self.browser, self.course_id)
self.discussion_page.visit()
self.discussion_page.click_new_post_button()
self.discussion_page.view_dialog_image_insert()
bok_choy.browser.save_screenshot(self.browser, 'test_present_input_upload_file')
self.assertTrue(self.discussion_page.view_dialog_image_insert().exists_input_file_on_dialog())
| agpl-3.0 | -3,283,191,238,097,019,400 | 39.126984 | 117 | 0.684335 | false |
LiZoRN/Charlotte | spiders/spider/spider/settings.py | 1 | 3131 | # -*- coding: utf-8 -*-
# Scrapy settings for spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'spider'
SPIDER_MODULES = ['spider.spiders']
NEWSPIDER_MODULE = 'spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'spider.middlewares.SpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'spider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'spider.pipelines.SpiderPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| gpl-3.0 | -3,630,876,355,947,236,000 | 32.666667 | 109 | 0.764293 | false |
matthiaskramm/corepy | corepy/arch/vmx/isa/__init__.py | 1 | 3324 | # Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# import platform_conf
from vmx_isa2 import *
#import corepy.spre.spe as spe
# Nothing to see here, move along... ;)
__active_code = None
def set_active_code(code):
global __active_code
if __active_code is not None:
__active_code.set_active_callback(None)
__active_code = code
if code is not None:
code.set_active_callback(set_active_code)
return
# Property version
def __get_active_code(self):
global __active_code
return __active_code
# Free function version
def get_active_code():
global __active_code
return __active_code
# Build the instructions
#for inst in vmx_isa.VMX_ISA:
# name = inst[0]
# machine_inst = getattr(machine, name)
# asm_order = inst[1]['asm']
# members = {}
# for key in inst[1].keys():
# members[key] = inst[1][key]
# members['asm_order'] = members['asm']
# members['machine_inst'] = machine_inst
# members['active_code'] = property(__get_active_code)
# globals()[inst[0]] = type(name, (spe.Instruction,), members)
for l in locals().values():
if isinstance(l, type) and issubclass(l, (VMXInstruction)):
l.active_code = property(__get_active_code)
| bsd-3-clause | 6,901,892,014,790,161,000 | 42.736842 | 80 | 0.595066 | false |
imjonsnooow/synapse | synapse/compat.py | 1 | 1088 | from __future__ import absolute_import,unicode_literals
'''
A module to isolate python version compatibility filth.
'''
import sys
import time
import base64
import collections
major = sys.version_info.major
minor = sys.version_info.minor
micro = sys.version_info.micro
majmin = (major,minor)
version = (major,minor,micro)
if version < (3,0,0):
import select
import Queue as queue
from cStringIO import StringIO as BytesIO
numtypes = (int,long)
strtypes = (str,unicode)
def enbase64(s):
return s.encode('base64')
def debase64(s):
return s.decode('base64')
def isstr(s):
return type(s) in (str,unicode)
def iterbytes(byts):
for c in byts:
yield(ord(c))
else:
import queue
from io import BytesIO
numtypes = (int,)
strtypes = (str,)
def enbase64(b):
return base64.b64encode(b).decode('utf8')
def debase64(b):
return base64.b64decode( b.encode('utf8') )
def isstr(s):
return isinstance(s,str)
def iterbytes(byts):
return iter(byts)
| apache-2.0 | 8,203,169,023,927,822,000 | 17.758621 | 55 | 0.636949 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.