repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kennethreitz/pipenv | pipenv/vendor/requirementslib/utils.py | 1 | 25567 | # -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import logging
import os
import sys
import pip_shims.shims
import six
import six.moves
import tomlkit
import vistir
from six.moves.urllib.parse import urlparse, urlsplit, urlunparse
from vistir.compat import Path, fs_decode
from vistir.path import ensure_mkdir_p, is_valid_url
from .environment import MYPY_RUNNING
# fmt: off
six.add_move( # type: ignore
six.MovedAttribute("Mapping", "collections", "collections.abc") # type: ignore
) # noqa # isort:skip
six.add_move( # type: ignore
six.MovedAttribute("Sequence", "collections", "collections.abc") # type: ignore
) # noqa # isort:skip
six.add_move( # type: ignore
six.MovedAttribute("Set", "collections", "collections.abc") # type: ignore
) # noqa # isort:skip
six.add_move( # type: ignore
six.MovedAttribute("ItemsView", "collections", "collections.abc") # type: ignore
) # noqa
from six.moves import ItemsView, Mapping, Sequence, Set # type: ignore # noqa # isort:skip
# fmt: on
if MYPY_RUNNING:
from typing import Dict, Any, Optional, Union, Tuple, List, Iterable, Text, TypeVar
STRING_TYPE = Union[bytes, str, Text]
S = TypeVar("S", bytes, str, Text)
PipfileEntryType = Union[STRING_TYPE, bool, Tuple[STRING_TYPE], List[STRING_TYPE]]
PipfileType = Union[STRING_TYPE, Dict[STRING_TYPE, PipfileEntryType]]
VCS_LIST = ("git", "svn", "hg", "bzr")
def setup_logger():
logger = logging.getLogger("requirementslib")
loglevel = logging.DEBUG
handler = logging.StreamHandler(stream=sys.stderr)
handler.setLevel(loglevel)
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
log = setup_logger()
SCHEME_LIST = ("http://", "https://", "ftp://", "ftps://", "file://")
VCS_SCHEMES = [
"git",
"git+http",
"git+https",
"git+ssh",
"git+git",
"git+file",
"hg",
"hg+http",
"hg+https",
"hg+ssh",
"hg+static-http",
"svn",
"svn+ssh",
"svn+http",
"svn+https",
"svn+svn",
"bzr",
"bzr+http",
"bzr+https",
"bzr+ssh",
"bzr+sftp",
"bzr+ftp",
"bzr+lp",
]
def is_installable_dir(path):
# type: (STRING_TYPE) -> bool
if pip_shims.shims.is_installable_dir(path):
return True
pyproject_path = os.path.join(path, "pyproject.toml")
if os.path.exists(pyproject_path):
pyproject = Path(pyproject_path)
pyproject_toml = tomlkit.loads(pyproject.read_text())
build_system = pyproject_toml.get("build-system", {}).get("build-backend", "")
if build_system:
return True
return False
def strip_ssh_from_git_uri(uri):
# type: (S) -> S
"""Return git+ssh:// formatted URI to git+git@ format"""
if isinstance(uri, six.string_types):
if "git+ssh://" in uri:
parsed = urlparse(uri)
# split the path on the first separating / so we can put the first segment
# into the 'netloc' section with a : separator
path_part, _, path = parsed.path.lstrip("/").partition("/")
path = "/{0}".format(path)
parsed = parsed._replace(
netloc="{0}:{1}".format(parsed.netloc, path_part), path=path
)
uri = urlunparse(parsed).replace("git+ssh://", "git+", 1)
return uri
def add_ssh_scheme_to_git_uri(uri):
# type: (S) -> S
"""Cleans VCS uris from pipenv.patched.notpip format"""
if isinstance(uri, six.string_types):
# Add scheme for parsing purposes, this is also what pip does
if uri.startswith("git+") and "://" not in uri:
uri = uri.replace("git+", "git+ssh://", 1)
parsed = urlparse(uri)
if ":" in parsed.netloc:
netloc, _, path_start = parsed.netloc.rpartition(":")
path = "/{0}{1}".format(path_start, parsed.path)
uri = urlunparse(parsed._replace(netloc=netloc, path=path))
return uri
def is_vcs(pipfile_entry):
# type: (PipfileType) -> bool
"""Determine if dictionary entry from Pipfile is for a vcs dependency."""
if isinstance(pipfile_entry, Mapping):
return any(key for key in pipfile_entry.keys() if key in VCS_LIST)
elif isinstance(pipfile_entry, six.string_types):
if not is_valid_url(pipfile_entry) and pipfile_entry.startswith("git+"):
pipfile_entry = add_ssh_scheme_to_git_uri(pipfile_entry)
parsed_entry = urlsplit(pipfile_entry)
return parsed_entry.scheme in VCS_SCHEMES
return False
def is_editable(pipfile_entry):
# type: (PipfileType) -> bool
if isinstance(pipfile_entry, Mapping):
return pipfile_entry.get("editable", False) is True
if isinstance(pipfile_entry, six.string_types):
return pipfile_entry.startswith("-e ")
return False
def is_star(val):
# type: (PipfileType) -> bool
return (isinstance(val, six.string_types) and val == "*") or (
isinstance(val, Mapping) and val.get("version", "") == "*"
)
def convert_entry_to_path(path):
# type: (Dict[S, Union[S, bool, Tuple[S], List[S]]]) -> S
"""Convert a pipfile entry to a string"""
if not isinstance(path, Mapping):
raise TypeError("expecting a mapping, received {0!r}".format(path))
if not any(key in path for key in ["file", "path"]):
raise ValueError("missing path-like entry in supplied mapping {0!r}".format(path))
if "file" in path:
path = vistir.path.url_to_path(path["file"])
elif "path" in path:
path = path["path"]
if not os.name == "nt":
return fs_decode(path)
return Path(fs_decode(path)).as_posix()
def is_installable_file(path):
# type: (PipfileType) -> bool
"""Determine if a path can potentially be installed"""
from packaging import specifiers
if isinstance(path, Mapping):
path = convert_entry_to_path(path)
# If the string starts with a valid specifier operator, test if it is a valid
# specifier set before making a path object (to avoid breaking windows)
if any(path.startswith(spec) for spec in "!=<>~"):
try:
specifiers.SpecifierSet(path)
# If this is not a valid specifier, just move on and try it as a path
except specifiers.InvalidSpecifier:
pass
else:
return False
parsed = urlparse(path)
is_local = (
not parsed.scheme
or parsed.scheme == "file"
or (len(parsed.scheme) == 1 and os.name == "nt")
)
if parsed.scheme and parsed.scheme == "file":
path = vistir.compat.fs_decode(vistir.path.url_to_path(path))
normalized_path = vistir.path.normalize_path(path)
if is_local and not os.path.exists(normalized_path):
return False
is_archive = pip_shims.shims.is_archive_file(normalized_path)
is_local_project = os.path.isdir(normalized_path) and is_installable_dir(
normalized_path
)
if is_local and is_local_project or is_archive:
return True
if not is_local and pip_shims.shims.is_archive_file(parsed.path):
return True
return False
def get_dist_metadata(dist):
import pkg_resources
from email.parser import FeedParser
if isinstance(dist, pkg_resources.DistInfoDistribution) and dist.has_metadata(
"METADATA"
):
metadata = dist.get_metadata("METADATA")
elif dist.has_metadata("PKG-INFO"):
metadata = dist.get_metadata("PKG-INFO")
else:
metadata = ""
feed_parser = FeedParser()
feed_parser.feed(metadata)
return feed_parser.close()
def get_setup_paths(base_path, subdirectory=None):
# type: (S, Optional[S]) -> Dict[S, Optional[S]]
if base_path is None:
raise TypeError("must provide a path to derive setup paths from")
setup_py = os.path.join(base_path, "setup.py")
setup_cfg = os.path.join(base_path, "setup.cfg")
pyproject_toml = os.path.join(base_path, "pyproject.toml")
if subdirectory is not None:
base_path = os.path.join(base_path, subdirectory)
subdir_setup_py = os.path.join(subdirectory, "setup.py")
subdir_setup_cfg = os.path.join(subdirectory, "setup.cfg")
subdir_pyproject_toml = os.path.join(subdirectory, "pyproject.toml")
if subdirectory and os.path.exists(subdir_setup_py):
setup_py = subdir_setup_py
if subdirectory and os.path.exists(subdir_setup_cfg):
setup_cfg = subdir_setup_cfg
if subdirectory and os.path.exists(subdir_pyproject_toml):
pyproject_toml = subdir_pyproject_toml
return {
"setup_py": setup_py if os.path.exists(setup_py) else None,
"setup_cfg": setup_cfg if os.path.exists(setup_cfg) else None,
"pyproject_toml": pyproject_toml if os.path.exists(pyproject_toml) else None,
}
def prepare_pip_source_args(sources, pip_args=None):
# type: (List[Dict[S, Union[S, bool]]], Optional[List[S]]) -> List[S]
if pip_args is None:
pip_args = []
if sources:
# Add the source to pip9.
pip_args.extend(["-i", sources[0]["url"]]) # type: ignore
# Trust the host if it's not verified.
if not sources[0].get("verify_ssl", True):
pip_args.extend(
["--trusted-host", urlparse(sources[0]["url"]).hostname]
) # type: ignore
# Add additional sources as extra indexes.
if len(sources) > 1:
for source in sources[1:]:
pip_args.extend(["--extra-index-url", source["url"]]) # type: ignore
# Trust the host if it's not verified.
if not source.get("verify_ssl", True):
pip_args.extend(
["--trusted-host", urlparse(source["url"]).hostname]
) # type: ignore
return pip_args
@ensure_mkdir_p(mode=0o777)
def _ensure_dir(path):
return path
_UNSET = object()
_REMAP_EXIT = object()
# The following functionality is either borrowed or modified from the itertools module
# in the boltons library by Mahmoud Hashemi and distributed under the BSD license
# the text of which is included below:
# (original text from https://github.com/mahmoud/boltons/blob/master/LICENSE)
# Copyright (c) 2013, Mahmoud Hashemi
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class PathAccessError(KeyError, IndexError, TypeError):
"""An amalgamation of KeyError, IndexError, and TypeError,
representing what can occur when looking up a path in a nested
object.
"""
def __init__(self, exc, seg, path):
self.exc = exc
self.seg = seg
self.path = path
def __repr__(self):
cn = self.__class__.__name__
return "%s(%r, %r, %r)" % (cn, self.exc, self.seg, self.path)
def __str__(self):
return "could not access %r from path %r, got error: %r" % (
self.seg,
self.path,
self.exc,
)
def get_path(root, path, default=_UNSET):
"""Retrieve a value from a nested object via a tuple representing the
lookup path.
>>> root = {'a': {'b': {'c': [[1], [2], [3]]}}}
>>> get_path(root, ('a', 'b', 'c', 2, 0))
3
The path format is intentionally consistent with that of
:func:`remap`.
One of get_path's chief aims is improved error messaging. EAFP is
great, but the error messages are not.
For instance, ``root['a']['b']['c'][2][1]`` gives back
``IndexError: list index out of range``
What went out of range where? get_path currently raises
``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2,
1), got error: IndexError('list index out of range',)``, a
subclass of IndexError and KeyError.
You can also pass a default that covers the entire operation,
should the lookup fail at any level.
Args:
root: The target nesting of dictionaries, lists, or other
objects supporting ``__getitem__``.
path (tuple): A list of strings and integers to be successively
looked up within *root*.
default: The value to be returned should any
``PathAccessError`` exceptions be raised.
"""
if isinstance(path, six.string_types):
path = path.split(".")
cur = root
try:
for seg in path:
try:
cur = cur[seg]
except (KeyError, IndexError) as exc:
raise PathAccessError(exc, seg, path)
except TypeError as exc:
# either string index in a list, or a parent that
# doesn't support indexing
try:
seg = int(seg)
cur = cur[seg]
except (ValueError, KeyError, IndexError, TypeError):
if not getattr(cur, "__iter__", None):
exc = TypeError("%r object is not indexable" % type(cur).__name__)
raise PathAccessError(exc, seg, path)
except PathAccessError:
if default is _UNSET:
raise
return default
return cur
def default_visit(path, key, value):
return key, value
_orig_default_visit = default_visit
# Modified from https://github.com/mahmoud/boltons/blob/master/boltons/iterutils.py
def dict_path_enter(path, key, value):
if isinstance(value, six.string_types):
return value, False
elif isinstance(value, (Mapping, dict)):
return value.__class__(), ItemsView(value)
elif isinstance(value, tomlkit.items.Array):
return value.__class__([], value.trivia), enumerate(value)
elif isinstance(value, (Sequence, list)):
return value.__class__(), enumerate(value)
elif isinstance(value, (Set, set)):
return value.__class__(), enumerate(value)
else:
return value, False
def dict_path_exit(path, key, old_parent, new_parent, new_items):
ret = new_parent
if isinstance(new_parent, (Mapping, dict)):
vals = dict(new_items)
try:
new_parent.update(new_items)
except AttributeError:
# Handle toml containers specifically
try:
new_parent.update(vals)
# Now use default fallback if needed
except AttributeError:
ret = new_parent.__class__(vals)
elif isinstance(new_parent, tomlkit.items.Array):
vals = tomlkit.items.item([v for i, v in new_items])
try:
new_parent._value.extend(vals._value)
except AttributeError:
ret = tomlkit.items.item(vals)
elif isinstance(new_parent, (Sequence, list)):
vals = [v for i, v in new_items]
try:
new_parent.extend(vals)
except AttributeError:
ret = new_parent.__class__(vals) # tuples
elif isinstance(new_parent, (Set, set)):
vals = [v for i, v in new_items]
try:
new_parent.update(vals)
except AttributeError:
ret = new_parent.__class__(vals) # frozensets
else:
raise RuntimeError("unexpected iterable type: %r" % type(new_parent))
return ret
def remap(
root, visit=default_visit, enter=dict_path_enter, exit=dict_path_exit, **kwargs
):
"""The remap ("recursive map") function is used to traverse and
transform nested structures. Lists, tuples, sets, and dictionaries
are just a few of the data structures nested into heterogenous
tree-like structures that are so common in programming.
Unfortunately, Python's built-in ways to manipulate collections
are almost all flat. List comprehensions may be fast and succinct,
but they do not recurse, making it tedious to apply quick changes
or complex transforms to real-world data.
remap goes where list comprehensions cannot.
Here's an example of removing all Nones from some data:
>>> from pprint import pprint
>>> reviews = {'Star Trek': {'TNG': 10, 'DS9': 8.5, 'ENT': None},
... 'Babylon 5': 6, 'Dr. Who': None}
>>> pprint(remap(reviews, lambda p, k, v: v is not None))
{'Babylon 5': 6, 'Star Trek': {'DS9': 8.5, 'TNG': 10}}
Notice how both Nones have been removed despite the nesting in the
dictionary. Not bad for a one-liner, and that's just the beginning.
See `this remap cookbook`_ for more delicious recipes.
.. _this remap cookbook: http://sedimental.org/remap.html
remap takes four main arguments: the object to traverse and three
optional callables which determine how the remapped object will be
created.
Args:
root: The target object to traverse. By default, remap
supports iterables like :class:`list`, :class:`tuple`,
:class:`dict`, and :class:`set`, but any object traversable by
*enter* will work.
visit (callable): This function is called on every item in
*root*. It must accept three positional arguments, *path*,
*key*, and *value*. *path* is simply a tuple of parents'
keys. *visit* should return the new key-value pair. It may
also return ``True`` as shorthand to keep the old item
unmodified, or ``False`` to drop the item from the new
structure. *visit* is called after *enter*, on the new parent.
The *visit* function is called for every item in root,
including duplicate items. For traversable values, it is
called on the new parent object, after all its children
have been visited. The default visit behavior simply
returns the key-value pair unmodified.
enter (callable): This function controls which items in *root*
are traversed. It accepts the same arguments as *visit*: the
path, the key, and the value of the current item. It returns a
pair of the blank new parent, and an iterator over the items
which should be visited. If ``False`` is returned instead of
an iterator, the value will not be traversed.
The *enter* function is only called once per unique value. The
default enter behavior support mappings, sequences, and
sets. Strings and all other iterables will not be traversed.
exit (callable): This function determines how to handle items
once they have been visited. It gets the same three
arguments as the other functions -- *path*, *key*, *value*
-- plus two more: the blank new parent object returned
from *enter*, and a list of the new items, as remapped by
*visit*.
Like *enter*, the *exit* function is only called once per
unique value. The default exit behavior is to simply add
all new items to the new parent, e.g., using
:meth:`list.extend` and :meth:`dict.update` to add to the
new parent. Immutable objects, such as a :class:`tuple` or
:class:`namedtuple`, must be recreated from scratch, but
use the same type as the new parent passed back from the
*enter* function.
reraise_visit (bool): A pragmatic convenience for the *visit*
callable. When set to ``False``, remap ignores any errors
raised by the *visit* callback. Items causing exceptions
are kept. See examples for more details.
remap is designed to cover the majority of cases with just the
*visit* callable. While passing in multiple callables is very
empowering, remap is designed so very few cases should require
passing more than one function.
When passing *enter* and *exit*, it's common and easiest to build
on the default behavior. Simply add ``from boltons.iterutils import
default_enter`` (or ``default_exit``), and have your enter/exit
function call the default behavior before or after your custom
logic. See `this example`_.
Duplicate and self-referential objects (aka reference loops) are
automatically handled internally, `as shown here`_.
.. _this example: http://sedimental.org/remap.html#sort_all_lists
.. _as shown here: http://sedimental.org/remap.html#corner_cases
"""
# TODO: improve argument formatting in sphinx doc
# TODO: enter() return (False, items) to continue traverse but cancel copy?
if not callable(visit):
raise TypeError("visit expected callable, not: %r" % visit)
if not callable(enter):
raise TypeError("enter expected callable, not: %r" % enter)
if not callable(exit):
raise TypeError("exit expected callable, not: %r" % exit)
reraise_visit = kwargs.pop("reraise_visit", True)
if kwargs:
raise TypeError("unexpected keyword arguments: %r" % kwargs.keys())
path, registry, stack = (), {}, [(None, root)]
new_items_stack = []
while stack:
key, value = stack.pop()
id_value = id(value)
if key is _REMAP_EXIT:
key, new_parent, old_parent = value
id_value = id(old_parent)
path, new_items = new_items_stack.pop()
value = exit(path, key, old_parent, new_parent, new_items)
registry[id_value] = value
if not new_items_stack:
continue
elif id_value in registry:
value = registry[id_value]
else:
res = enter(path, key, value)
try:
new_parent, new_items = res
except TypeError:
# TODO: handle False?
raise TypeError(
"enter should return a tuple of (new_parent,"
" items_iterator), not: %r" % res
)
if new_items is not False:
# traverse unless False is explicitly passed
registry[id_value] = new_parent
new_items_stack.append((path, []))
if value is not root:
path += (key,)
stack.append((_REMAP_EXIT, (key, new_parent, value)))
if new_items:
stack.extend(reversed(list(new_items)))
continue
if visit is _orig_default_visit:
# avoid function call overhead by inlining identity operation
visited_item = (key, value)
else:
try:
visited_item = visit(path, key, value)
except Exception:
if reraise_visit:
raise
visited_item = True
if visited_item is False:
continue # drop
elif visited_item is True:
visited_item = (key, value)
# TODO: typecheck?
# raise TypeError('expected (key, value) from visit(),'
# ' not: %r' % visited_item)
try:
new_items_stack[-1][1].append(visited_item)
except IndexError:
raise TypeError("expected remappable root, not: %r" % root)
return value
def merge_items(target_list, sourced=False):
if not sourced:
target_list = [(id(t), t) for t in target_list]
ret = None
source_map = {}
def remerge_enter(path, key, value):
new_parent, new_items = dict_path_enter(path, key, value)
if ret and not path and key is None:
new_parent = ret
try:
cur_val = get_path(ret, path + (key,))
except KeyError as ke:
pass
else:
new_parent = cur_val
return new_parent, new_items
def remerge_exit(path, key, old_parent, new_parent, new_items):
return dict_path_exit(path, key, old_parent, new_parent, new_items)
for t_name, target in target_list:
if sourced:
def remerge_visit(path, key, value):
source_map[path + (key,)] = t_name
return True
else:
remerge_visit = default_visit
ret = remap(target, enter=remerge_enter, visit=remerge_visit, exit=remerge_exit)
if not sourced:
return ret
return ret, source_map
| mit | 6,469,714,402,878,080,000 | 37.102832 | 93 | 0.616146 | false | 3.900381 | false | false | false |
ivanalejandro0/spotimute | spotimute/test_spotimute.py | 1 | 1083 | #!/usr/bin/env python
# encoding: utf-8
from audiomanager import AudioManager
from spotify import Spotify
def status(audio, spotify):
print '-' * 50
print "Sink name:", audio._sink_name
print "Sink id:", audio._sink_id
print "is muted:", audio.is_muted()
print "volume:", audio.get_volume()
print '-' * 5
print "Spotify title:", spotify.get_title().encode('utf-8')
print "Spotify title type:", type(spotify.get_title())
# print "Spotify title unicode:", spotify.get_title().decode('')
print "Spotify is blacklisted:", spotify.is_blacklisted()
print '-' * 50
def loop(audio, spotify):
import time
a = audio
while True:
if spotify.is_blacklisted():
if not a.is_muted():
a.mute()
status(audio, spotify)
else:
if a.is_muted():
a.unmute()
status(audio, spotify)
time.sleep(0.1)
if __name__ == '__main__':
audio = AudioManager()
spotify = Spotify()
status(audio, spotify)
# loop(audio, spotify)
| gpl-2.0 | -7,367,753,314,312,036,000 | 23.613636 | 68 | 0.574331 | false | 3.482315 | false | false | false |
deepmind/dm_control | dm_control/locomotion/walkers/base.py | 1 | 6682 | # Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base class for Walkers."""
import abc
import collections
from dm_control import composer
from dm_control.composer.observation import observable
from dm_env import specs
import numpy as np
def _make_readonly_float64_copy(value):
if np.isscalar(value):
return np.float64(value)
else:
out = np.array(value, dtype=np.float64)
out.flags.writeable = False
return out
class WalkerPose(collections.namedtuple(
'WalkerPose', ('qpos', 'xpos', 'xquat'))):
"""A named tuple representing a walker's joint and Cartesian pose."""
__slots__ = ()
def __new__(cls, qpos=None, xpos=(0, 0, 0), xquat=(1, 0, 0, 0)):
"""Creates a new WalkerPose.
Args:
qpos: The joint position for the pose, or `None` if the `qpos0` values in
the `mjModel` should be used.
xpos: A Cartesian displacement, for example if the walker should be lifted
or lowered by a specific amount for this pose.
xquat: A quaternion displacement for the root body.
Returns:
A new instance of `WalkerPose`.
"""
return super(WalkerPose, cls).__new__(
cls,
qpos=_make_readonly_float64_copy(qpos) if qpos is not None else None,
xpos=_make_readonly_float64_copy(xpos),
xquat=_make_readonly_float64_copy(xquat))
def __eq__(self, other):
return (np.all(self.qpos == other.qpos) and
np.all(self.xpos == other.xpos) and
np.all(self.xquat == other.xquat))
class Walker(composer.Robot, metaclass=abc.ABCMeta):
"""Abstract base class for Walker robots."""
def create_root_joints(self, attachment_frame):
attachment_frame.add('freejoint')
def _build_observables(self):
return WalkerObservables(self)
def transform_vec_to_egocentric_frame(self, physics, vec_in_world_frame):
"""Linearly transforms a world-frame vector into walker's egocentric frame.
Note that this function does not perform an affine transformation of the
vector. In other words, the input vector is assumed to be specified with
respect to the same origin as this walker's egocentric frame. This function
can also be applied to matrices whose innermost dimensions are either 2 or
3. In this case, a matrix with the same leading dimensions is returned
where the innermost vectors are replaced by their values computed in the
egocentric frame.
Args:
physics: An `mjcf.Physics` instance.
vec_in_world_frame: A NumPy array with last dimension of shape (2,) or
(3,) that represents a vector quantity in the world frame.
Returns:
The same quantity as `vec_in_world_frame` but reexpressed in this
entity's egocentric frame. The returned np.array has the same shape as
np.asarray(vec_in_world_frame).
Raises:
ValueError: if `vec_in_world_frame` does not have shape ending with (2,)
or (3,).
"""
return super().global_vector_to_local_frame(physics, vec_in_world_frame)
def transform_xmat_to_egocentric_frame(self, physics, xmat):
"""Transforms another entity's `xmat` into this walker's egocentric frame.
This function takes another entity's (E) xmat, which is an SO(3) matrix
from E's frame to the world frame, and turns it to a matrix that transforms
from E's frame into this walker's egocentric frame.
Args:
physics: An `mjcf.Physics` instance.
xmat: A NumPy array of shape (3, 3) or (9,) that represents another
entity's xmat.
Returns:
The `xmat` reexpressed in this entity's egocentric frame. The returned
np.array has the same shape as np.asarray(xmat).
Raises:
ValueError: if `xmat` does not have shape (3, 3) or (9,).
"""
return super().global_xmat_to_local_frame(physics, xmat)
@abc.abstractproperty
def root_body(self):
raise NotImplementedError
@abc.abstractproperty
def observable_joints(self):
raise NotImplementedError
@property
def action_spec(self):
if not self.actuators:
minimum, maximum = (), ()
else:
minimum, maximum = zip(*[
a.ctrlrange if a.ctrlrange is not None else (-1., 1.)
for a in self.actuators
])
return specs.BoundedArray(
shape=(len(self.actuators),),
dtype=np.float,
minimum=minimum,
maximum=maximum,
name='\t'.join([actuator.name for actuator in self.actuators]))
def apply_action(self, physics, action, random_state):
"""Apply action to walker's actuators."""
del random_state
physics.bind(self.actuators).ctrl = action
class WalkerObservables(composer.Observables):
"""Base class for Walker obserables."""
@composer.observable
def joints_pos(self):
return observable.MJCFFeature('qpos', self._entity.observable_joints)
@composer.observable
def sensors_gyro(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.gyro)
@composer.observable
def sensors_accelerometer(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.accelerometer)
@composer.observable
def sensors_framequat(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.framequat)
# Semantic groupings of Walker observables.
def _collect_from_attachments(self, attribute_name):
out = []
for entity in self._entity.iter_entities(exclude_self=True):
out.extend(getattr(entity.observables, attribute_name, []))
return out
@property
def proprioception(self):
return ([self.joints_pos] +
self._collect_from_attachments('proprioception'))
@property
def kinematic_sensors(self):
return ([self.sensors_gyro,
self.sensors_accelerometer,
self.sensors_framequat] +
self._collect_from_attachments('kinematic_sensors'))
@property
def dynamic_sensors(self):
return self._collect_from_attachments('dynamic_sensors')
| apache-2.0 | 2,419,647,580,086,868,000 | 32.747475 | 80 | 0.673152 | false | 3.681543 | false | false | false |
pyroscope/pyrobase | src/pyrobase/bencode.py | 1 | 7221 | # -*- coding: utf-8 -*-
# pylint: disable=too-few-public-methods,invalid-name
""" Bencode support.
Copyright (c) 2009-2017 The PyroScope Project <[email protected]>
See http://en.wikipedia.org/wiki/Bencode
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from six import string_types, text_type, binary_type, integer_types
class BencodeError(ValueError):
""" Error during decoding or encoding.
"""
class Decoder(object):
""" Decode a string or stream to an object.
"""
def __init__(self, data, char_encoding='utf-8'):
""" Initialize encoder.
"""
if isinstance(data, text_type):
self.data = data.encode(char_encoding)
else:
self.data = data
self.offset = 0
self.char_encoding = char_encoding
def decode(self, check_trailer=False): # pylint: disable=I0011,R0912
""" Decode data in C{self.data} and return deserialized object.
@param check_trailer: Raise error if trailing junk is found in data?
@raise BencodeError: Invalid data.
"""
if self.offset >= len(self.data):
raise BencodeError("Unexpected end of data at offset %d/%d" % (
self.offset, len(self.data),
))
kind = self.data[self.offset:self.offset+1] # get bytes of length 1, not an int^
if b'0' <= kind <= b'9':
# String
try:
end = self.data.find(b':', self.offset)
length = int(self.data[self.offset:end], 10)
except (ValueError, TypeError):
raise BencodeError("Bad string length at offset %d (%r...)" % (
self.offset, self.data[self.offset:self.offset+32]
))
self.offset = end+length+1
obj = self.data[end+1:self.offset]
if self.char_encoding:
try:
obj = obj.decode(self.char_encoding)
except (UnicodeError, AttributeError):
# deliver non-decodable string (byte arrays) as-is
pass
elif kind == b'i':
# Integer
try:
end = self.data.find(b'e', self.offset+1)
obj = int(self.data[self.offset+1:end], 10)
except (ValueError, TypeError):
raise BencodeError("Bad integer at offset %d (%r...)" % (
self.offset, self.data[self.offset:self.offset+32]
))
self.offset = end+1
elif kind == b'l':
# List
self.offset += 1
obj = []
while self.data[self.offset:self.offset+1] != b'e':
obj.append(self.decode())
self.offset += 1
elif kind == b'd':
# Dict
self.offset += 1
obj = {}
while self.data[self.offset:self.offset+1] != b'e':
key = self.decode()
obj[key] = self.decode()
self.offset += 1
else:
raise BencodeError("Format error at offset %d (%r...)" % (
self.offset, self.data[self.offset:self.offset+32]
))
if check_trailer and self.offset != len(self.data):
raise BencodeError("Trailing data at offset %d (%r...)" % (
self.offset, self.data[self.offset:self.offset+32]
))
return obj
class Encoder(object):
""" Encode a given object to an array of bytestrings.
"""
def __init__(self, char_encoding='utf-8'):
""" Initialize encoder.
"""
self.result = []
self.char_encoding = char_encoding
def encode(self, obj):
""" Add the given object to the result.
"""
if isinstance(obj, bool):
self.result.append(b"i1e" if obj else b"i0e")
elif isinstance(obj, integer_types):
self.result.extend([b"i", text_type(obj).encode(self.char_encoding), b"e"])
elif isinstance(obj, string_types):
if isinstance(obj, text_type):
obj = obj.encode(self.char_encoding)
self.result.extend([str(len(obj)).encode(self.char_encoding), b':', obj])
elif isinstance(obj, binary_type):
# Previous check catches py2's str
self.result.extend([str(len(obj)).encode(self.char_encoding), b':', obj])
elif hasattr(obj, "__bencode__"):
self.encode(obj.__bencode__())
elif hasattr(obj, "items"):
# Dictionary
self.result.append(b'd')
for key, val in sorted(obj.items()):
if isinstance(key, integer_types):
key = text_type(key).encode(self.char_encoding)
if not isinstance(key, string_types + (binary_type,)):
raise BencodeError("Dict key must be bytestring, found '%s'" % key)
if isinstance(key, text_type):
key = key.encode(self.char_encoding)
self.result.extend([str(len(key)).encode(self.char_encoding), b':', key])
self.encode(val)
self.result.append(b'e')
else:
# Treat as iterable
try:
items = iter(obj)
except TypeError as exc:
raise BencodeError("Unsupported non-iterable object %r of type %s (%s)" % (
obj, type(obj), exc
))
else:
self.result.append(b'l')
for item in items:
self.encode(item)
self.result.append(b'e')
return self.result
def bdecode(data, char_encoding='utf-8'):
""" Decode a string or stream to an object.
"""
return Decoder(data, char_encoding).decode(check_trailer=True)
def bencode(obj, char_encoding='utf-8'):
""" Encode a given object to data.
"""
return b''.join(Encoder(char_encoding).encode(obj))
def bread(stream):
""" Decode a file or stream to an object.
"""
if hasattr(stream, "read"):
return bdecode(stream.read())
else:
handle = open(stream, "rb")
try:
return bdecode(handle.read())
finally:
handle.close()
def bwrite(stream, obj):
""" Encode a given object to a file or stream.
"""
handle = None
if not hasattr(stream, "write"):
stream = handle = open(stream, "wb")
try:
stream.write(bencode(obj))
finally:
if handle:
handle.close()
| gpl-2.0 | 1,536,549,421,344,492,800 | 33.884058 | 91 | 0.549924 | false | 4.038591 | false | false | false |
Jumpscale/jumpscale6_core | lib/JumpScale/grid/messagehandling/test/test_performance.py | 1 | 2017 | import time
from JumpScale import j
if not q._init_called:
from JumpScale.core.InitBaseCore import q
def logtest(total, interval, message, format=False):
j.core.messagehandler3.connect2localLogserver()
start = time.time()
result = []
for n in xrange(1, total + 1):
if n % interval == 0:
t = time.time()
delta = t - start
print "Did %d of %d logs in %ss" % (n, total, delta)
result.append({
"done": n,
"time": delta
})
if format:
data = {
"n": n,
"total": total
}
j.logger.log(message % data)
else:
j.logger.log(message)
totalTime = time.time() - start
average = total / float(totalTime)
print "Logged %d messages at %f messages per second on average" % (total,
average)
return result
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Test the logging system")
parser.add_argument("--message", help="The message to log, can include "
"%(n)s and %(total)s if you enable formatting", default="Testing 1 2 3")
parser.add_argument("--format", action="store_true",
help="Message contains formatting")
parser.add_argument("--total", type=int, default=10000,
help="The total amount of log calls that should happen")
parser.add_argument("--interval", type=int, default=1000,
help="The interval to print the passed time")
parser.add_argument("--zeromq", action="store_true",
help="Enable the 0MQ log handler")
parser.add_argument("--dump-json", dest="dumpjson",
type=argparse.FileType('w'))
args = parser.parse_args()
result = logtest(args.total, args.interval, args.message, args.format)
| bsd-2-clause | -4,300,191,139,590,101,000 | 33.186441 | 96 | 0.540407 | false | 4.282378 | false | false | false |
AASHE/iss | run_tests.py | 1 | 1459 | #!/usr/bin/env python
import logging
import os
import sys
BASE_PATH = os.path.dirname(__file__)
logging.basicConfig()
def main():
"""
Standalone django model test with a 'memory-only-django-installation'.
You can play with a django model without a complete django app
installation.
http://www.djangosnippets.org/snippets/1044/
"""
sys.exc_clear()
import django.test.utils
os.environ["DJANGO_SETTINGS_MODULE"] = "django.conf.global_settings"
from django.conf import global_settings
# ISS Settings:
global_settings.MS_ACCESS_KEY = os.environ["MS_ACCESS_KEY"]
global_settings.MS_SECRET_KEY = os.environ["MS_SECRET_KEY"]
global_settings.MS_ASSOCIATION_ID = os.environ["MS_ASSOCIATION_ID"]
global_settings.INSTALLED_APPS = ('iss',)
global_settings.DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_PATH, 'iss.sqlite'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
global_settings.SECRET_KEY = "blahblah"
if django.VERSION > (1, 7):
django.setup()
test_runner = django.test.utils.get_runner(global_settings)
if django.VERSION > (1, 2):
failures = test_runner().run_tests(['iss'])
else:
failures = test_runner(['iss'], verbosity=2)
sys.exit(failures)
if __name__ == '__main__':
main()
| mit | -1,009,402,497,536,099,600 | 22.918033 | 74 | 0.604524 | false | 3.584767 | true | false | false |
muffinresearch/amo-validator | validator/testcases/scripting.py | 7 | 1914 | from validator.constants import PACKAGE_THEME
from validator.contextgenerator import ContextGenerator
from validator.testcases.javascript import traverser
from validator.testcases.javascript.jsshell import get_tree
def test_js_file(err, filename, data, line=0, context=None, pollutable=False):
'Test a JS file by parsing and analyzing its tokens.'
if err.detected_type == PACKAGE_THEME:
err.warning(
err_id=('testcases_scripting',
'test_js_file',
'theme_js'),
warning='JS run from full theme',
description='Themes should not contain executable code.',
filename=filename,
line=line)
before_tier = None
# Set the tier to 4 (Security Tests)
if err is not None:
before_tier = err.tier
err.set_tier(3)
tree = get_tree(data, filename=filename, err=err)
if not tree:
if before_tier:
err.set_tier(before_tier)
return
# Generate a context if one is not available.
if context is None:
context = ContextGenerator(data)
t = traverser.Traverser(err, filename, line, context=context,
is_jsm=(filename.endswith('.jsm') or
'EXPORTED_SYMBOLS' in data))
t.pollutable = pollutable
t.run(tree)
# Reset the tier so we don't break the world
if err is not None:
err.set_tier(before_tier)
def test_js_snippet(err, data, filename, line=0, context=None):
'Process a JS snippet by passing it through to the file tester.'
if not data:
return
# Wrap snippets in a function to prevent the parser from freaking out
# when return statements exist without a corresponding function.
data = '(function(){%s\n})()' % data
test_js_file(err, filename, data, line, context, pollutable=False)
| bsd-3-clause | 2,682,991,133,804,893,700 | 32.578947 | 78 | 0.621735 | false | 4.055085 | true | false | false |
bgris/ODL_bgris | lib/python3.5/idlelib/idle_test/test_config_help.py | 2 | 3566 | """Unittests for idlelib.configHelpSourceEdit"""
import unittest
from idlelib.idle_test.mock_tk import Var, Mbox, Entry
from idlelib import configHelpSourceEdit as help_dialog_module
help_dialog = help_dialog_module.GetHelpSourceDialog
class Dummy_help_dialog:
# Mock for testing the following methods of help_dialog
menu_ok = help_dialog.menu_ok
path_ok = help_dialog.path_ok
ok = help_dialog.ok
cancel = help_dialog.cancel
# Attributes, constant or variable, needed for tests
menu = Var()
entryMenu = Entry()
path = Var()
entryPath = Entry()
result = None
destroyed = False
def destroy(self):
self.destroyed = True
# menu_ok and path_ok call Mbox.showerror if menu and path are not ok.
orig_mbox = help_dialog_module.tkMessageBox
showerror = Mbox.showerror
class ConfigHelpTest(unittest.TestCase):
dialog = Dummy_help_dialog()
@classmethod
def setUpClass(cls):
help_dialog_module.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
help_dialog_module.tkMessageBox = orig_mbox
def test_blank_menu(self):
self.dialog.menu.set('')
self.assertFalse(self.dialog.menu_ok())
self.assertEqual(showerror.title, 'Menu Item Error')
self.assertIn('No', showerror.message)
def test_long_menu(self):
self.dialog.menu.set('hello' * 10)
self.assertFalse(self.dialog.menu_ok())
self.assertEqual(showerror.title, 'Menu Item Error')
self.assertIn('long', showerror.message)
def test_good_menu(self):
self.dialog.menu.set('help')
showerror.title = 'No Error' # should not be called
self.assertTrue(self.dialog.menu_ok())
self.assertEqual(showerror.title, 'No Error')
def test_blank_path(self):
self.dialog.path.set('')
self.assertFalse(self.dialog.path_ok())
self.assertEqual(showerror.title, 'File Path Error')
self.assertIn('No', showerror.message)
def test_invalid_file_path(self):
self.dialog.path.set('foobar' * 100)
self.assertFalse(self.dialog.path_ok())
self.assertEqual(showerror.title, 'File Path Error')
self.assertIn('not exist', showerror.message)
def test_invalid_url_path(self):
self.dialog.path.set('ww.foobar.com')
self.assertFalse(self.dialog.path_ok())
self.assertEqual(showerror.title, 'File Path Error')
self.assertIn('not exist', showerror.message)
self.dialog.path.set('htt.foobar.com')
self.assertFalse(self.dialog.path_ok())
self.assertEqual(showerror.title, 'File Path Error')
self.assertIn('not exist', showerror.message)
def test_good_path(self):
self.dialog.path.set('https://docs.python.org')
showerror.title = 'No Error' # should not be called
self.assertTrue(self.dialog.path_ok())
self.assertEqual(showerror.title, 'No Error')
def test_ok(self):
self.dialog.destroyed = False
self.dialog.menu.set('help')
self.dialog.path.set('https://docs.python.org')
self.dialog.ok()
self.assertEqual(self.dialog.result, ('help',
'https://docs.python.org'))
self.assertTrue(self.dialog.destroyed)
def test_cancel(self):
self.dialog.destroyed = False
self.dialog.cancel()
self.assertEqual(self.dialog.result, None)
self.assertTrue(self.dialog.destroyed)
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
| gpl-3.0 | -2,845,700,318,091,803,600 | 32.641509 | 73 | 0.650589 | false | 3.722338 | true | false | false |
coderjames/pascal | quex-0.63.1/quex-exe.py | 1 | 2473 | #! /usr/bin/env python
#
# Quex is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this library; if not, write to the Free Software Foundation, Inc., 59
# Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# (C) 2005-2012 Frank-Rene Schaefer
#
################################################################################
import sys
import os
import quex.exception_checker as exception_checker
if sys.version_info[0] >= 3:
print("error: This version of quex was not implemented for Python >= 3.0")
print("error: Please, use Python versions 2.x.")
sys.exit(-1)
if os.environ.has_key("QUEX_PATH") == False:
print("Environment variable QUEX_PATH has not been defined.")
else:
sys.path.insert(0, os.environ["QUEX_PATH"])
try:
exception_checker.do_on_import(sys.argv)
import quex.DEFINITIONS
import quex.input.command_line.core as command_line
import quex.input.command_line.query as query
import quex.core as core
except BaseException as instance:
exception_checker.handle(instance)
try:
pass
# import psyco
# psyco.full()
except:
pass
if __name__ == "__main__":
try:
quex.DEFINITIONS.check()
# (*) Test Exceptions __________________________________________________
if exception_checker.do(sys.argv):
# Done: Tests about exceptions have been performed
pass
# (*) Query ____________________________________________________________
elif query.do(sys.argv):
# Done: Queries about unicode sets and regular expressions
pass
# (*) The Real Job _____________________________________________________
elif command_line.do(sys.argv):
# To do: Interpret input files and generate code or drawings.
core.do()
except BaseException as instance:
exception_checker.handle(instance)
| bsd-2-clause | -2,426,523,884,556,605,400 | 32.876712 | 80 | 0.597655 | false | 4.067434 | false | false | false |
jaustinpage/frc_rekt | data/vex/download_curves.py | 1 | 2867 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import logging
import os
import re
import requests
import zipfile
from pathlib import Path
from urllib.parse import urlparse
# yapf: disable
files = {'cim': ['https://content.vexrobotics.com/motors/217-2000-cim/cim-motor-curve-data-20151104.csv',
'https://content.vexrobotics.com/motors/217-2000-cim/cim-peak-power-data-20151104.csv',
'https://content.vexrobotics.com/motors/217-2000-cim/cim-locked-rotor-data-20151104.zip'],
'mini-cim': ['https://content.vexrobotics.com/motors/217-3371-mini-cim/mini-cim-motor-curve-data-20151207.csv',
'https://content.vexrobotics.com/motors/217-3371-mini-cim/mini-cim-peak-power-data-20151207.csv',
'https://content.vexrobotics.com/motors/217-3371-mini-cim/mini-cim-locked-rotor-data-20151209-2.zip'],
'775pro': ['https://content.vexrobotics.com/motors/217-4347-775pro/775pro-motor-curve-data-20151208.csv',
'https://content.vexrobotics.com/motors/217-4347-775pro/775pro-peak-power-data-20151210.csv',
'https://content.vexrobotics.com/motors/217-4347-775pro/775pro-locked-rotor-data-20151209.zip'],
'bag': ['https://content.vexrobotics.com/motors/217-3351-bag/bag-motor-curve-data-20151207.csv',
'https://content.vexrobotics.com/motors/217-3351-bag/bag-peak-power-data-20151207.csv',
'https://content.vexrobotics.com/motors/217-3351-bag/bag-locked-rotor-data-20151207.zip']}
# yapf: enable
def file_exists(file_path):
try:
if os.stat("file").st_size != 0:
return True
except FileNotFoundError:
pass
return False
def unzip_file(path):
path = Path(path)
if path.suffix == '.zip':
logging.info('Unzipping %s to %s', path, path.parent)
with zipfile.ZipFile(str(path), 'r') as zip_ref:
zip_ref.extractall(str(path.parent))
def download_file(motor, url):
# Gets just the '*.csv' part
fname = Path(urlparse(url).path).name
fpath = '{0}/{1}'.format(motor, fname)
logging.info('Downloading %s to %s', url, fpath)
if not file_exists(fname):
r = requests.get(url)
with open(fpath, 'wb') as dfile:
dfile.write(r.content)
return fpath
def download_files():
directory = 'data/vex/'
for motor in files.keys():
mpath = directory + motor
try:
logging.info('Creating direcotry %s', str(mpath))
os.makedirs(mpath)
except FileExistsError:
logging.info('Directory %s already exists', str(mpath))
for url in files[motor]:
fpath = download_file(mpath, url)
unzip_file(fpath)
def main():
download_files()
if __name__ == '__main__':
main()
| mit | -599,580,394,822,621,800 | 35.291139 | 124 | 0.622951 | false | 3.147091 | false | false | false |
pyfarm/pyfarm-agent | pyfarm/agent/logger/python.py | 2 | 3495 | # No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Python Logger
-------------
This module provides the facilities to capture and send
log records from Python's logger into Twisted. It also
provides a :class:`Logger` class and :func:`getLogger`
function to replace the built-in Python implementations.
"""
from time import time
from logging import (
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL, Handler)
from twisted.python.log import msg
class Logger(object):
"""
A stand-in for an instance of :class:`logging.Logger`
Unlike the standard logger this just forwards all messages
to Twisted's logging system.
"""
def __init__(self, name):
self.name = name
self.disabled = False
def debug(self, message, *args):
if not self.disabled:
msg(message, args=args, system=self.name,
time=time(), logLevel=DEBUG)
def info(self, message, *args):
if not self.disabled:
msg(message, args=args, system=self.name,
time=time(), logLevel=INFO)
def warning(self, message, *args):
if not self.disabled:
msg(message, args=args, system=self.name,
time=time(), logLevel=WARNING)
def error(self, message, *args):
if not self.disabled:
msg(message, args=args, system=self.name,
time=time(), logLevel=ERROR)
def critical(self, message, *args):
if not self.disabled:
msg(message, args=args, system=self.name,
time=time(), logLevel=CRITICAL)
def fatal(self, message, *args):
if not self.disabled:
msg(message, args=args, system=self.name,
time=time(), logLevel=FATAL)
class LogRecordToTwisted(Handler):
"""
Captures logging events for a standard Python logger
and sends them to Twisted. Twisted has a built in
class to help work with Python's logging library
however it won't translate everything directly.
"""
def __init__(self):
# We don't use these attributes because the observer
# handles these. But we still have to provide them
# because the Python logger system will try to access
# them.
self.level = NOTSET
self.filters = []
def emit(self, record):
"""
Emits an instance of :class:`logging.LogRecord` into Twisted's
logging system.
"""
msg(record.msg, args=record.args, python_record=True,
time=record.created, system=record.name, logLevel=record.levelno)
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def close(self):
pass
def getLogger(name):
"""
Analog to Python's :func:`logging.getLogger` except it
returns instances of :class:`Logger` instead.
"""
return Logger("pf.%s" % name) | apache-2.0 | -4,971,633,859,256,521,000 | 28.880342 | 77 | 0.643491 | false | 4.106933 | false | false | false |
glorpen/webassets | src/webassets/filter/jinja2.py | 21 | 1267 | from __future__ import absolute_import
from webassets.filter import Filter
__all__ = ('Jinja2',)
class Jinja2(Filter):
"""Process a file through the Jinja2 templating engine.
Requires the ``jinja2`` package (https://github.com/mitsuhiko/jinja2).
The Jinja2 context can be specified with the `JINJA2_CONTEXT` configuration
option or directly with `context={...}`. Example:
.. code-block:: python
Bundle('input.css', filters=Jinja2(context={'foo': 'bar'}))
Additionally to enable template loading mechanics from your project you can provide
`JINJA2_ENV` or `jinja2_env` arg to make use of already created environment.
"""
name = 'jinja2'
max_debug_level = None
options = {
'context': 'JINJA2_CONTEXT',
'jinja2_env': 'JINJA2_ENV'
}
def setup(self):
try:
import jinja2
except ImportError:
raise EnvironmentError('The "jinja2" package is not installed.')
else:
self.jinja2 = jinja2
super(Jinja2, self).setup()
def input(self, _in, out, **kw):
tpl_factory = self.jinja2_env.from_string if self.jinja2_env else self.jinja2.Template
out.write(tpl_factory(_in.read()).render(self.context or {}))
| bsd-2-clause | -1,223,482,834,556,889,900 | 29.166667 | 94 | 0.636148 | false | 3.759644 | false | false | false |
churchlab/millstone | genome_designer/genome_finish/millstone_de_novo_fns.py | 1 | 11684 | import subprocess
import os
from django.conf import settings
import numpy as np
import pysam
from genome_finish import __path__ as gf_path_list
from genome_finish.insertion_placement_read_trkg import extract_left_and_right_clipped_read_dicts
from main.models import Dataset
from main.models import Variant
from main.models import VariantSet
from utils.bam_utils import clipping_stats
from variants.variant_sets import update_variant_in_set_memberships
GENOME_FINISH_PATH = gf_path_list[0]
VELVETH_BINARY = settings.TOOLS_DIR + '/velvet/velveth'
VELVETG_BINARY = settings.TOOLS_DIR + '/velvet/velvetg'
def get_altalign_reads(input_bam_path, output_bam_path, xs_threshold=None):
input_af = pysam.AlignmentFile(input_bam_path, 'rb')
output_af = pysam.AlignmentFile(output_bam_path, 'wb',
template=input_af)
for read in input_af:
if read.has_tag('XS') and read.has_tag('AS'):
if read.get_tag('AS') <= read.get_tag('XS'):
output_af.write(read)
output_af.close()
input_af.close()
def get_piled_reads(input_bam_path, output_bam_path,
clipping_threshold=None):
"""Creates bam of reads that have more than clipping_threshold bases
of clipping and are stacked 3 standard deviations higher than the average
pileup of clipped reads. If no clipping_threshold specified, clipping
stats for the alignment are calculated and the clipping_threshold is set
to the mean + one stddev of the per read clipping of a sample of
10000 reads.
"""
if clipping_threshold is None:
stats = clipping_stats(input_bam_path, sample_size=10000)
clipping_threshold = int(stats['mean'] + stats['std'])
input_af = pysam.AlignmentFile(input_bam_path, 'rb')
output_af = pysam.AlignmentFile(output_bam_path, 'wb',
template=input_af)
lr_clipped = extract_left_and_right_clipped_read_dicts(
input_af,
clipping_threshold=clipping_threshold)
input_af.close()
for clipped_dict in [
lr_clipped['left_clipped'], lr_clipped['right_clipped']]:
stack_counts = map(len, clipped_dict.values())
mean_stacking = np.mean(stack_counts)
std_stacking = np.std(stack_counts)
stacking_cutoff = mean_stacking + 3 * std_stacking
for read_list in clipped_dict.values():
if len(read_list) > stacking_cutoff:
for read in read_list:
output_af.write(read)
output_af.close()
def get_clipped_reads_smart(input_bam_path, output_bam_path,
clipping_threshold=8, phred_encoding=None):
"""Gets reads not overlapping their adaptor with a terminal
segment of clipping with average phred scores above the cutoff
"""
phred_encoding_to_shift = {
'Illumina 1.5': 31,
'Sanger / Illumina 1.9': 0
}
CLIPPED_AVG_PHRED_CUTOFF = 20
if (phred_encoding is not None and
phred_encoding in phred_encoding_to_shift):
CLIPPED_AVG_PHRED_CUTOFF += phred_encoding_to_shift[phred_encoding]
SOFT_CLIP = 4
HARD_CLIP = 5
CLIP = [SOFT_CLIP, HARD_CLIP]
input_af = pysam.AlignmentFile(input_bam_path, 'rb')
output_af = pysam.AlignmentFile(output_bam_path, 'wb',
template=input_af)
for read in input_af:
# If no cigartuples, i.e. unmapped, continue
if read.cigartuples is None:
continue
if read.is_secondary or read.is_supplementary:
continue
# TODO: Account for template length
# adapter_overlap = max(read.template_length - query_alignment_length, 0)
# Determine left and right clipped counts
left_clipping = (read.cigartuples[0][1]
if read.cigartuples[0][0] in CLIP else 0)
right_clipping = (read.cigartuples[-1][1]
if read.cigartuples[-1][0] in CLIP else 0)
# Write reads to file if clipped bases have average phred score
# above cutoff
if left_clipping > clipping_threshold:
clipped_phred_scores = read.query_qualities[:left_clipping]
if np.mean(clipped_phred_scores) > CLIPPED_AVG_PHRED_CUTOFF:
output_af.write(read)
continue
if right_clipping > clipping_threshold:
clipped_phred_scores = read.query_qualities[-right_clipping:]
if np.mean(clipped_phred_scores) > CLIPPED_AVG_PHRED_CUTOFF:
output_af.write(read)
continue
output_af.close()
input_af.close()
def get_unmapped_reads(bam_filename, output_filename, avg_phred_cutoff=None):
if avg_phred_cutoff is not None:
intermediate_filename = '_unfiltered'.join(
os.path.splitext(output_filename))
else:
intermediate_filename = output_filename
cmd = '{samtools} view -h -b -f 0x4 {bam_filename}'.format(
samtools=settings.SAMTOOLS_BINARY,
bam_filename=bam_filename)
with open(intermediate_filename, 'w') as output_fh:
subprocess.call(
cmd, stdout=output_fh, shell=True,
executable=settings.BASH_PATH)
if avg_phred_cutoff is not None:
filter_low_qual_read_pairs(intermediate_filename, output_filename,
avg_phred_cutoff)
def add_paired_mates(input_bam_path, source_bam_filename, output_bam_path):
bam_file = pysam.AlignmentFile(input_bam_path)
input_qnames_to_read = {}
for read in bam_file:
input_qnames_to_read[read.qname] = True
bam_file.close()
original_alignmentfile = pysam.AlignmentFile(source_bam_filename, "rb")
output_alignmentfile = pysam.AlignmentFile(
output_bam_path, "wh", template=original_alignmentfile)
for read in original_alignmentfile:
if input_qnames_to_read.get(read.qname, False):
if not read.is_secondary and not read.is_supplementary:
output_alignmentfile.write(read)
output_alignmentfile.close()
original_alignmentfile.close()
def filter_out_unpaired_reads(input_bam_path, output_bam_path):
input_af = pysam.AlignmentFile(input_bam_path, 'rb')
# Build qname -> flag list dictionary
read_flags = {}
for read in input_af:
if read.qname not in read_flags:
read_flags[read.qname] = [read.flag]
else:
read_flags[read.qname].append(read.flag)
# Build qname -> is_paired dictionary
reads_with_pairs = {}
not_primary_alignment_flag = 256
supplementary_alignment_flag = 2048
for qname, flags in read_flags.items():
primary_count = 0
for f in flags:
if (not (f & not_primary_alignment_flag) and
not (f & supplementary_alignment_flag)):
primary_count += 1
if primary_count == 2:
reads_with_pairs[qname] = True
# Write reads in input to output if not in bad_quality_names
output_af = pysam.AlignmentFile(output_bam_path, "wb",
template=input_af)
input_af.reset()
for read in input_af:
if read.qname in reads_with_pairs:
output_af.write(read)
output_af.close()
input_af.close()
def filter_low_qual_read_pairs(input_bam_path, output_bam_path,
avg_phred_cutoff=20):
"""
Filters out reads with average phred scores below cutoff
TODO: use `bwa sort -n file` to stdout to remove the need to
use a dictionary with readnames.
"""
# Put qnames with average phred scores below the cutoff into dictionary
bad_quality_qnames = {}
input_af = pysam.AlignmentFile(input_bam_path, "rb")
for read in input_af:
avg_phred = np.mean(read.query_qualities)
if avg_phred < avg_phred_cutoff:
bad_quality_qnames[read.qname] = True
read_count = 0
input_af.close()
input_af = pysam.AlignmentFile(input_bam_path, "rb")
# Write reads in input to output if not in bad_quality_names
output_af = pysam.AlignmentFile(output_bam_path, "wb",
template=input_af)
for read in input_af:
if not bad_quality_qnames.get(read.qname, False):
output_af.write(read)
read_count += 1
output_af.close()
input_af.close()
def create_de_novo_variants_set(alignment_group, variant_set_label,
callers_to_include=[
'DE_NOVO_ASSEMBLY', 'GRAPH_WALK', 'ME_GRAPH_WALK']):
"""Put all the variants generated by VCFs which have INFO__METHOD
values in callers_to_include into a new VariantSet
Args:
alignment_group: An AlignmentGroup instance
variant_set_label: A label for the new VariantSet
callers_to_include: INFO__METHOD values to select
variants with
Returns:
variant_set: The VariantSet instance created
"""
ref_genome = alignment_group.reference_genome
# Get de novo variants
de_novo_variants = []
for variant in Variant.objects.filter(
reference_genome=ref_genome):
for vccd in variant.variantcallercommondata_set.all():
if vccd.data.get('INFO_METHOD', None) in callers_to_include:
de_novo_variants.append(variant)
continue
variant_set = VariantSet.objects.create(
reference_genome=ref_genome,
label='de_novo_variants')
update_variant_in_set_memberships(
ref_genome,
[variant.uid for variant in de_novo_variants],
'add',
variant_set.uid)
return variant_set
def get_coverage_stats(sample_alignment):
"""Returns a dictionary with chromosome seqrecord_ids as keys and
subdictionaries as values.
Each subdictionary has three keys: length, mean, and std which hold the
particular chromosome's length, mean read coverage, and standard
deviation of read coverage
"""
maybe_chrom_cov_dict = sample_alignment.data.get('chrom_cov_dict', None)
if maybe_chrom_cov_dict is not None:
return maybe_chrom_cov_dict
bam_path = sample_alignment.dataset_set.get(type=Dataset.TYPE.BWA_ALIGN).get_absolute_location()
alignment_af = pysam.AlignmentFile(bam_path)
chrom_list = alignment_af.references
chrom_lens = alignment_af.lengths
c_starts = [0]*len(chrom_list)
c_ends = chrom_lens
chrom_cov_lists = []
for chrom, c_start, c_end in zip(chrom_list, c_starts, c_ends):
chrom_cov_lists.append([])
cov_list = chrom_cov_lists[-1]
for pileup_col in alignment_af.pileup(chrom,
start=c_start, end=c_end, truncate=True):
depth = pileup_col.nsegments
cov_list.append(depth)
alignment_af.close()
sub_dict_tup_list = zip(
chrom_lens,
map(np.mean, chrom_cov_lists),
map(np.std, chrom_cov_lists))
sub_dict_list = map(
lambda tup: dict(zip(['length', 'mean', 'std'], tup)),
sub_dict_tup_list)
chrom_cov_dict = dict(zip(chrom_list, sub_dict_list))
sample_alignment.data['chrom_cov_dict'] = chrom_cov_dict
sample_alignment.save()
return chrom_cov_dict
def get_avg_genome_coverage(sample_alignment):
"""Returns a float which is the average genome coverage, calculated as
the average length-weighted read coverage over all chromosomes
"""
coverage_stats = get_coverage_stats(sample_alignment)
len_weighted_coverage = 0
total_len = 0
for sub_dict in coverage_stats.values():
length = sub_dict['length']
avg_coverage = sub_dict['mean']
len_weighted_coverage += length * avg_coverage
total_len += length
return float(len_weighted_coverage) / total_len
| mit | 647,521,577,785,154,200 | 32.768786 | 100 | 0.642759 | false | 3.547055 | false | false | false |
jmichelsen/horrorbox | music.py | 1 | 1187 | import logging
import random
import time
import vlc
import constants
log = logging.getLogger(__file__)
class HorrorsEar(object):
def __init__(self):
self.background_instance = vlc.Instance()
self.background_tracks = self.background_instance.media_list_new()
self.background_player = self.background_instance.media_list_player_new()
self.scare_player = vlc.MediaPlayer
def begin(self):
[self.background_tracks.add_media(self.background_instance.media_new(track))
for track in constants.BACKGROUND_TRACKS]
self.background_player.set_media_list(self.background_tracks)
self.background_player.set_playback_mode(vlc.PlaybackMode.loop)
self.background_player.play()
def scare(self):
track = random.choice(constants.SCARE_TRACKS)
log.info('playing scare: {}'.format(track))
player = self.scare_player(track)
player.play()
time.sleep(1)
while player.is_playing():
continue
player.release()
def end(self):
self.background_player.stop()
self.background_instance.release()
self.background_player.release()
| gpl-3.0 | 5,873,880,076,083,507,000 | 29.435897 | 84 | 0.662174 | false | 3.804487 | false | false | false |
zachgates/jugg | jugg/core.py | 1 | 6323 | import asyncio
import base64
import json
import pyarchy
import socket
import struct
import time
from . import constants, security
class Datagram(object):
@classmethod
def from_string(cls, str_: str):
dg = cls(**json.loads(str_))
# Verify timestamp
if dg.timestamp >= time.time():
return cls()
else:
return dg
def __init__(self,
command: int = None,
sender: str = None, recipient: str = None,
data: str = None, hmac: str = None,
timestamp: float = None):
object.__init__(self)
self.__command = int(command) if command else command
self.__sender = str(sender) if sender else sender
self.__recipient = str(recipient) if recipient else recipient
self.__data = data
self.__hmac = str(hmac) if hmac else hmac
self.__ts = float(timestamp) if timestamp is not None else time.time()
def __str__(self):
return json.dumps({
'command': self.command,
'sender': self.sender,
'recipient': self.recipient,
'data': self.data,
'hmac': self.hmac,
'timestamp': self.timestamp,
})
@property
def command(self) -> int:
return self.__command
@command.setter
def command(self, command):
self.__command = int(command)
@property
def sender(self) -> str:
return self.__sender
@property
def recipient(self) -> str:
return self.__recipient
@recipient.setter
def recipient(self, recipient: str):
self.__recipient = str(recipient)
@property
def route(self) -> tuple:
return (self.sender, self.recipient)
@property
def data(self):
return self.__data
@data.setter
def data(self, data):
if isinstance(data, bytes):
self.__data = data.decode()
else:
self.__data = data
@property
def hmac(self) -> str:
return self.__hmac
@property
def timestamp(self) -> float:
return self.__ts
class Node(security.KeyHandler, pyarchy.common.ClassicObject):
def __init__(self, stream_reader, stream_writer):
security.KeyHandler.__init__(self)
pyarchy.common.ClassicObject.__init__(self, '', False)
self._stream_reader = stream_reader
self._stream_writer = stream_writer
self._commands = {}
async def send(self, dg: Datagram):
data = str(dg).encode()
data = base64.b85encode(data)
data = self.encrypt(data)
n_bytes = len(data)
pointer = struct.pack('I', socket.htonl(n_bytes))
try:
self._stream_writer.write(pointer + data)
await self._stream_writer.drain()
except ConnectionResetError:
# Client crashed
pass
async def recv(self, n_bytes: int = None):
try:
if n_bytes is None:
pointer = await self._stream_reader.readexactly(4)
n_bytes = socket.ntohl(struct.unpack('I', pointer)[0])
data = await self._stream_reader.read(n_bytes)
data = self.decrypt(data)
data = base64.b85decode(data).decode()
return Datagram.from_string(data)
except ConnectionResetError:
# Client crashed
pass
except asyncio.streams.IncompleteReadError:
# Failed to receive pointer
pass
except struct.error:
# Received invalid pointer
pass
except json.decoder.JSONDecodeError:
# Bad Datagram
pass
return None
async def start(self):
await self.send_handshake()
# Maintain the connection
while True:
dg = await self.recv()
if not dg:
break
if await self.handle_datagram(dg):
break
async def stop(self):
self._stream_writer.close()
async def handle_datagram(self, dg: Datagram):
func = self._commands.get(dg.command)
if not func:
func = getattr(
self,
'handle_' + constants.CMD_2_NAME.get(dg.command),
None)
if func:
await func(dg)
else:
await self.send_error(constants.ERR_DISCONNECT)
async def send_handshake(self):
await self.send(
Datagram(
command = constants.CMD_SHAKE,
sender = self.id,
recipient = self.id,
data = self.key))
async def handle_handshake(self, dg: Datagram):
self.counter_key = int(dg.data)
async def send_error(self, errno: int):
await self.send(
Datagram(
command = constants.CMD_ERR,
sender = self.id,
recipient = self.id,
data = errno))
# Add functionality in subclass
async def handle_error(self, dg: Datagram):
return NotImplemented
async def send_response(self, data):
await self.send(
Datagram(
command = constants.CMD_RESP,
sender = self.id,
recipient = self.id,
data = data))
class ClientBase(Node):
def __init__(self,
stream_reader, stream_writer,
hmac_key, challenge_key):
Node.__init__(self, stream_reader, stream_writer)
self._hmac_key = hmac_key or b''
self._challenge_key = challenge_key or b''
self._name = None
def __lt__(self, obj):
if isinstance(obj, pyarchy.core.NamedObject):
return self.name < obj.name
else:
return NotImplemented
def __gt__(self):
if isinstance(obj, pyarchy.core.NamedObject):
return self.name > obj.name
else:
return NotImplemented
@property
def name(self) -> str:
return str(self._name)
@name.setter
def name(self, name: str):
if self._name is None:
self._name = str(name)
else:
raise AttributeError('name can only be set once')
__all__ = [
Datagram,
Node,
ClientBase,
]
| mit | -8,457,216,418,216,259,000 | 24.913934 | 78 | 0.541357 | false | 4.283875 | false | false | false |
8BitJosh/JukeBot | JukeBot/mainNamespace.py | 1 | 9499 | import socketio
class mainNamespace(socketio.AsyncNamespace):
def __init__(self, _playlist, _player, _playlistlist, _config, _loop, _users, _namespace):
self.playlist = _playlist
self.player = _player
self.playlistlist = _playlistlist
self.config = _config
self.loop = _loop
self.users = _users
self.shuffles = {}
self.skips = []
socketio.AsyncNamespace.__init__(self, namespace=_namespace)
def newSong(self):
self.shuffles = {}
self.skips = []
async def on_connected(self, sid, msg):
self.users.userConnect(sid, msg['session'], msg['ip'])
if self.users.isSidAdmin(sid):
self.enter_room(sid, 'adminUsr', namespace='/main')
else:
self.enter_room(sid, 'stdUsr', namespace='/main')
print('Client Connected - {} - {}'.format(sid, self.users.getSidName(sid)))
await self.resendAll()
async def on_sendAll(self, sid):
await self.resendAll()
async def resendAll(self):
await self.emit('featureDisable', { 'skip': self.config.skippingEnable,
'delete': self.config.songDeletionEnable,
'shuffle': self.config.shuffleEnable,
'newplaylists': self.config.newPlaylists,
'playlistdeletion': self.config.enablePlaylistDeletion,
'playlistediting': self.config.enablePlaylistEditing },
room='stdUsr')
await self.emit('featureDisable', { 'skip': True, 'delete': True, 'shuffle': True, 'newplaylists': True, 'playlistdeletion': True,
'playlistediting': True},
room='adminUsr')
await self.playlist.sendPlaylist()
await self.player.sendDuration()
await self.emit('volume_set', {'vol': self.player.getVolume()})
await self.emit('playlistList', self.playlistlist.getPlaylists())
async def on_sent_song(self, sid, msg):
global playlist
title = msg['data']
requester = self.users.getSidName(sid) # todo convert ip to device name
if title != '':
str = 'Queued Song - ' + title
if '&' in title:
str = str + '\nIf you wanted to add a playlist use the full playlist page that has "playlist" in the url'
start_pos = title.find('&')
msg = title[:start_pos]
else:
msg = title
print('{} - Submitted - {}'.format(requester, title))
p = self.loop.create_task(self.playlist.process(_title=msg, _requester=requester))
else:
str = 'Enter a Song Name'
await self.emit('response', {'data': str}, room=sid)
async def on_button(self, sid, msg):
command = msg['data']
if command == 'skip' and (self.config.skippingEnable or self.users.isSidAdmin(sid)):
if (self.config.voteSkipNum is 0) or self.users.isSidAdmin(sid):
await self.emit('response', {'data': 'Song Skipped'}, room=sid)
print('{} - Skipped song'.format(self.users.getSidName(sid)))
await self.player.stop()
else:
if self.users.getSidName(sid) not in self.skips:
self.skips.append(self.users.getSidName(sid))
print("{} - Voted to skip the song".format(self.users.getSidName(sid)))
if len(self.skips) >= self.config.voteSkipNum:
await self.emit('response', {'data': 'Song Skipped'}, room=sid)
print('Song was vote Skipped by {} people'.format(len(self.skips)))
await self.player.stop()
elif command == 'shuffle' and (self.config.shuffleEnable or self.users.isSidAdmin(sid)):
if (self.config.shuffleLimit is 0) or self.users.isSidAdmin(sid):
await self.emit('response', {'data': 'Songs Shuffled'}, namespace='/main')
print('{} - Shuffled playlist'.format(self.users.getSidName(sid)))
await self.playlist.shuff()
else:
if self.users.getSidName(sid) in self.shuffles:
self.shuffles[self.users.getSidName(sid)] = self.shuffles[self.users.getSidName(sid)] + 1
else:
self.shuffles[self.users.getSidName(sid)] = 1
if self.shuffles[self.users.getSidName(sid)] <= self.config.shuffleLimit:
await self.emit('response', {'data': 'Songs Shuffled'}, namespace='/main')
print('{} - Shuffled playlist'.format(self.users.getSidName(sid)))
await self.playlist.shuff()
elif command == 'clear' and (self.config.songDeletionEnable or self.users.isSidAdmin(sid)):
await self.playlist.clearall()
print('{} - Cleared all of playlist'.format(self.users.getSidName(sid)))
await self.emit('response', {'data': 'Playlist Cleared'}, namespace='/main')
elif command == 'pause':
if self.player.isPaused():
print('{} - Resumed the song'.format(self.users.getSidName(sid)))
await self.emit('response', {'data': 'Song Resumed'}, namespace='/main')
await self.emit('pause_button', {'data': 'Pause'})
await self.player.pause()
elif self.player.running():
print('{} - Paused the song'.format(self.users.getSidName(sid)))
await self.emit('response', {'data': 'Song Paused'}, namespace='/main')
await self.emit('pause_button', {'data': 'Resume'})
await self.player.pause()
async def on_volume(self, sid, msg):
vol = int(msg['vol'])
self.player.setVolume(vol)
await self.emit('volume_set', {'vol': vol})
async def on_delete(self, sid, msg):
if self.config.songDeletionEnable or self.users.isSidAdmin(sid):
title = msg['title']
index = msg['data']
print('{} - Removed index {} title = {}'.format(self.users.getSidName(sid), index, title))
await self.playlist.remove(index, title)
s = 'Removed song from playlist - ' + title
await self.emit('response', {'data': s}, room=sid)
async def on_addPlaylist(self, sid, msg):
songs = self.playlistlist.getsongs(msg['title'])
if songs == {}:
return
await self.emit('response', {'data': 'added playlist - ' + msg['title']}, room=sid)
await self.playlist.addPlaylist(songs, self.users.getSidName(sid))
async def on_savequeue(self, sid, msg):
if self.config.newPlaylists or self.users.isSidAdmin(sid):
await self.emit('response', {'data': 'Saving Current queue as playlist named - ' + str(msg['name'])}, room=sid)
print('{} - Saved queue as - {}'.format(self.users.getSidName(sid), msg['name']))
songs = await self.playlist.getQueue()
songs['data']['name'] = str(msg['name'])
await self.playlistlist.addqueue(songs)
async def on_newempty(self, sid, msg):
if self.config.newPlaylists or self.users.isSidAdmin(sid):
await self.emit('response', {'data': 'Creating a new empty playlist named - ' + str(msg['name'])}, room=sid)
print('{} - Created a new playlist named - {}'.format(self.users.getSidName(sid), msg['name']))
await self.playlistlist.newPlaylist(msg['name'])
async def on_getplaylist(self, sid, msg):
name = msg['data']
print('user modifing - {}'.format(name))
songs = self.playlistlist.getsongs(name)
await self.emit('selectedplaylist', songs, room=sid)
async def on_add_song(self, sid, msg):
if self.config.enablePlaylistEditing or self.users.isSidAdmin(sid):
await self.playlistlist.addSong(msg['playlistname'], msg['data'])
print('{} - Added - {} - to - {}'.format(self.users.getSidName(sid), msg['data'], msg['playlistname']))
songs = self.playlistlist.getsongs(msg['playlistname'])
await self.emit('selectedplaylist', songs, room=sid)
async def on_removePlaySong(self, sid, msg):
if self.config.enablePlaylistEditing or self.users.isSidAdmin(sid):
await self.playlistlist.removeSong(msg['playlistname'], msg['index'], msg['title'])
print('{} - Removed {} from playlist - {}'.format(self.users.getSidName(sid), msg['title'], msg['playlistname']))
songs = self.playlistlist.getsongs(msg['playlistname'])
await self.emit('selectedplaylist', songs, room=sid)
async def on_removePlaylist(self, sid, msg):
if self.config.enablePlaylistDeletion or self.users.isSidAdmin(sid):
if msg['title'].lower() == msg['userinput'].lower():
await self.playlistlist.removePlaylist(msg['title'])
print('{} - Removed playlist from server - {}'.format(self.users.getSidName(sid), msg['title']))
await self.emit('selectedplaylist', {'data': {'name': 'Playlist:', 'dur':0}}, room=sid)
else:
await self.emit('response', {'data': 'Incorrect name, Unable to remove playlist'}, room=sid)
| mit | -4,696,739,627,191,824,000 | 45.563725 | 139 | 0.570165 | false | 3.98114 | true | false | false |
tderensis/digital_control | control_eval.py | 1 | 13704 | """
Functions used to evaluate the quality of control systems. Includes measures such as
stability margins and settling time.
Requires numpy
"""
import numpy as np
from numpy import linalg as LA
import math
import cmath
def upper_gain_margin(A, B, C, discrete=True, tol=1e-3, max_gain_dB=60, output_dB=True):
""" Calculate the upper gain margin for each input of a loop transfer function
described by the state space matrices A, B, and C. Note that stability margins
for MIMO systems may not represent the true robustness of the system because
the gain or phase can change in all channels at once by a different amount.
Args:
A: The A matrix of the loop transfer function
B: The B matrix of the loop transfer function
C: The C matrix of the loop transfer function
discrete (optional): True if the loop transfer function is discrete, False
if it is continuous. Defaults to True.
tol (optional): The tolerance to calculate the result to. Defaults to 1e-3.
max_gain_dB (optional): The maximum dB to search to. Defaults to 60 dB
ouput_dB (optional): True if the output should be in dB, False if the result
should be returned as gain. Defaults to True.
Returns:
list: The list of upper gain margins at each input. Units dependent on the value of output_dB.
"""
(n, p) = B.shape
max_gain = max(1, math.pow(10, max_gain_dB/20))
gain_list = [None] * p
# Create a measure of stability for the poles based on if the system is discrete
if discrete is True:
# Stable poles for discrete systems are inside the unit circle
def is_unstable(poles):
return max(abs(poles)) > 1
else:
# Stable poles for continuous systems are negative
def is_unstable(poles):
return max([ pole.real for pole in poles ]) > 0
for i in range(0, p):
# Use the bisect method for calculating the gain margin
t1 = 1
t2 = max_gain
gain_mat = np.matrix(np.eye(p))
gain = t1
while 20 * math.log(t2/t1, 10) > tol:
gain = (t1 + t2)/2;
# Multiply the current input by the gain
gain_mat[i, i] = gain
eig_vals, v = LA.eig(A - B*gain_mat*C)
if is_unstable(eig_vals):
t2 = gain # decrease the gain
else:
t1 = gain # increase the gain
if output_dB is True:
gain_list[i] = 20 * math.log(gain, 10)
else:
gain_list[i] = gain
return gain_list
def lower_gain_margin(A, B, C, discrete=True, tol=1e-3, min_gain_dB=-60, output_dB=True):
""" Calculate the lower gain margin for each input of a loop transfer function
described by the state space matrices A, B, and C. Note that stability margins
for MIMO systems may not represent the true robustness of the system because
the gain or phase can change in all channels at once by a different amount.
Not all systems have lower gain margin. These systems will report the minimum value.
Args:
A: The A matrix of the loop transfer function
B: The B matrix of the loop transfer function
C: The C matrix of the loop transfer function
discrete (optional): True if the loop transfer function is discrete, False
if it is continuous. Defaults to True.
tol (optional): The tolerance to calculate the result to. Defaults to 1e-3.
min_gain_dB (optional): The minimum dB to search to. Defaults to -60 dB
ouput_dB (optional): True if the output should be in dB, False if the result
should be returned as gain. Defaults to True.
Returns:
list: The list of lower gain margins for each input. Units dependent on the value of output_dB.
"""
(n, p) = B.shape
min_gain = min(1, math.pow(10, min_gain_dB/20))
gain_list = [None] * p
# Create a measure of stability for the poles based on if the system is discrete
if discrete is True:
# Stable poles for discrete systems are inside the unit circle
def is_unstable(poles):
return max(abs(poles)) > 1
else:
# Stable poles for continuous systems are negative
def is_unstable(poles):
return max([ pole.real for pole in poles ]) > 0
for i in range(0, p):
# Use the bisect method for calculating the gain margin
t1 = min_gain
t2 = 1
gain_mat = np.matrix(np.eye(p))
gain = t1
while 20 * math.log(t2/t1, 10) > tol:
gain = (t1 + t2)/2;
# Multiply the current input by the gain
gain_mat[i, i] = gain
eig_vals, v = LA.eig(A - B*gain_mat*C)
if is_unstable(eig_vals):
t1 = gain # increase the gain
else:
t2 = gain # decrease the gain
if output_dB is True:
gain_list[i] = 20 * math.log(gain, 10)
else:
gain_list[i] = gain
return gain_list
def phase_margin(A, B, C, discrete=True, tol=1e-3, max_angle_deg=120):
""" Calculate the phase margin for each input of a loop transfer function
described by the state space matrices A, B, and C. Note that stability margins
for MIMO systems may not represent the true robustness of the system because
the gain or phase can change in all channels at once by a different amount.
Args:
A: The A matrix of the loop transfer function
B: The B matrix of the loop transfer function
C: The C matrix of the loop transfer function
discrete (optional): True if the loop transfer function is discrete, False
if it is continuous. Defaults to True.
tol (optional): The tolerance to calculate the result to. Defaults to 1e-3.
max_angle_deg (optional): The maximum angle to search to. Defaults to 120 degrees
Returns:
list: The list of phase margins for each input. Units are degrees.
"""
(n, p) = B.shape
max_angle = max(1, max_angle_deg)
angle_list = [None] * p
# Create a measure of stability for the poles based on if the system is discrete
if discrete is True:
# Stable poles for discrete systems are inside the unit circle
def is_stable(poles):
return max(abs(poles)) <= 1
else:
# Stable poles for continuous systems are negative
def is_stable(poles):
return max([ pole.real for pole in poles ]) <= 0
for i in range(0, p):
# Use the bisect method for calculating the phase margin
t1 = 1
t2 = max_angle
gain_mat = np.matrix(np.eye(p, dtype=complex))
angle = t1
while t2 - t1 > tol:
angle = (t1 + t2)/2;
# Multiply the current input by the phase offset
gain_mat[i, i] = cmath.exp(-1j * angle * math.pi/180)
eig_vals, v = LA.eig(A - B*gain_mat*C)
if is_stable(eig_vals):
t1 = angle # increase the angle
else:
t2 = angle # decrease the angle
angle_list[i] = angle
return angle_list
def print_stability_margins(A, B, C, discrete=True, tol=1e-3):
""" Print the stability margins (gain and phase) for each input of a loop
transfer function described by the state space matrices A, B, and C.
Note that stability margins for MIMO systems may not represent the true
robustness of the system because the gain or phase can change in all channels
at once by a different amount.
Args:
A: The A matrix of the loop transfer function
B: The B matrix of the loop transfer function
C: The C matrix of the loop transfer function
discrete (optional): True if the loop transfer function is discrete, False
if it is continuous. Defaults to True.
tol (optional): The tolerance to calculate the result to. Defaults to 1e-3.
Returns:
Nothing
"""
ugm = upper_gain_margin(A, B, C, discrete=discrete, tol=tol)
lgm = lower_gain_margin(A, B, C, discrete=discrete, tol=tol)
phm = phase_margin(A, B, C, discrete=discrete, tol=tol)
for i in range(1, len(ugm)+1):
print("Input " + str(i) + " upper gain margin = " + str(round(ugm[i-1], 2)) + " dB")
print("Input " + str(i) + " lower gain margin = " + str(round(lgm[i-1], 2)) + " dB")
print("Input " + str(i) + " phase margin = " + str(round(phm[i-1],2)) + " deg")
def settling_time(t, y, percent=0.02, start=None, end=None):
""" Calculate the time it takes for each output to reach its
final value to within a given percentage.
Args:
t (array): The time points (1 x n)
y (ndarray): A list of the output vectors (n, m), where m is the number of states.
percent (optional): The percent to which the output needs to settle to.
start (optional): The starting value to use for calculations. If none is given, then
the max of the first values of y are used. Default is None.
end (optional): The end value to use for calculations. If none is given, then
the min of the last values of y are used.
Returns:
The settling time in seconds.
"""
settling_times = []
(num_samples, states) = y.shape
if start is None:
start = max([abs(n) for n in y[0,:].tolist()[0]])
if end is None:
end = round(min([abs(n) for n in y[-1,:].tolist()[0]]), 3)
yout = np.transpose(y).tolist()
limit = percent * abs(start - end)
limit_high = end + limit
limit_low = end - limit
for state in range(0, states):
i = num_samples
for y in reversed(yout[state]):
i -= 1
if y > limit_high or y < limit_low:
settling_times.append(t[i])
break
return max(settling_times)
def ltf_regsf(sys_ol, L):
""" Construct the the loop transfer function of the full state feedback
regulator system. Used for calculating stability.
Args:
sys_ol (StateSpace): The state-space model of the plant
L (matrix): The gain matrix
Returns:
tuple: (A, B, C) Where A, B, and C are the matrices that describe the
loop transfer function
"""
A = sys_ol.A
B = sys_ol.B
return (A, B, L)
def ltf_regob(sys_ol, L, K):
""" Construct the the loop transfer function of the full order
observer system. Used for calculating stability.
Args:
sys_ol (StateSpace): The state-space model of the plant
L (matrix): The gain matrix
K (matrix): The observer gain matrix
Returns:
tuple: (A, B, C) Where A, B, and C are the matrices that describe the
loop transfer function
"""
A = sys_ol.A
B = sys_ol.B
C = sys_ol.C
(n, p) = B.shape
A_ltf_top_row = np.concatenate((A, np.zeros((n, n))), axis=1)
A_ltf_bot_row = np.concatenate((K * C, A - (K * C) - (B * L)), axis=1)
A_ltf = np.concatenate((A_ltf_top_row, A_ltf_bot_row), axis=0)
B_ltf = np.concatenate((B, np.zeros((n, p))), axis=0)
C_ltf = np.concatenate((np.zeros((p, n)), L), axis=1)
return (A_ltf, B_ltf, C_ltf)
def ltf_tsob(sys_ol, Aa, Ba, L1, L2, K):
""" Construct the the loop transfer function of the full order
observer tracking system. Used for calculating stability.
Args:
sys_ol (StateSpace): The state-space model of the plant
Aa (matrix): The additional dynamics state matrix
Ba (matrix): The additional dynamics input matrix
L1 (matrix): The plant gain matrix
L2 (matrix): The additional dynamics gain matrix
K (matrix): The observer gain matrix
Returns:
tuple: (A, B, C) Where A, B, and C are the matrices that describe the
loop transfer function
"""
A = sys_ol.A
B = sys_ol.B
C = sys_ol.C
(n, p) = B.shape
(na, pa) = Ba.shape
A_ltf_top_row = np.concatenate((A, np.zeros((n, n+na))), axis=1)
A_ltf_mid_row = np.concatenate((K * C, A - K * C - B * L1, -B * L2), axis=1)
A_ltf_bot_row = np.concatenate((Ba * C, np.zeros((na, n)), Aa), axis=1)
A_ltf = np.concatenate((A_ltf_top_row, A_ltf_mid_row, A_ltf_bot_row), axis=0)
B_ltf = np.concatenate((B, np.zeros((n + na, pa))), axis=0)
C_ltf = np.concatenate((np.zeros((p, n)), L1, L2), axis=1)
return (A_ltf, B_ltf, C_ltf)
def ltf_tssf(sys_ol, Aa, Ba, L1, L2):
""" Construct the the loop transfer function of the full state
feedback tracking system. Used for calculating stability.
Args:
sys_ol (StateSpace): The state-space model of the plant
Aa (matrix): The additional dynamics state matrix
Ba (matrix): The additional dynamics input matrix
L1 (matrix): The plant gain matrix
L2 (matrix): The additional dynamics gain matrix
Returns:
tuple: (A, B, C) Where A, B, and C are the matrices that describe the
loop transfer function
"""
A = sys_ol.A
B = sys_ol.B
C = sys_ol.C
(n, p) = B.shape
(na, pa) = Ba.shape
A_ltf_top_row = np.concatenate((A, np.zeros((n, na))), axis=1)
A_ltf_bot_row = np.concatenate((Ba * C, Aa), axis=1)
A_ltf = np.concatenate((A_ltf_top_row, A_ltf_bot_row), axis=0)
B_ltf = np.concatenate((B, np.zeros((na, pa))), axis=0)
C_ltf = np.concatenate((L1, L2), axis=1)
return (A_ltf, B_ltf, C_ltf)
| mit | 5,929,957,209,731,719,000 | 36.037838 | 103 | 0.601211 | false | 3.610116 | false | false | false |
uw-it-aca/myuw | myuw/views/api/textbook.py | 1 | 3640 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from restclients_core.exceptions import DataFailureException
from myuw.dao.registration import get_schedule_by_term
from myuw.dao.instructor_schedule import get_instructor_schedule_by_term
from myuw.dao.term import get_specific_term, get_current_quarter
from myuw.dao.textbook import (
get_textbook_by_schedule, get_order_url_by_schedule)
from myuw.logger.timer import Timer
from myuw.logger.logresp import (
log_api_call, log_msg, log_data_not_found_response)
from myuw.views import prefetch_resources
from myuw.views.api import ProtectedAPI
from myuw.views.error import handle_exception, data_not_found, data_error
logger = logging.getLogger(__name__)
class Textbook(ProtectedAPI):
"""
Performs actions on resource at /api/v1/books/[year][quarter][summer_term].
"""
def get(self, request, *args, **kwargs):
"""
GET returns 200 with textbooks for the given quarter
"""
timer = Timer()
year = kwargs.get("year")
quarter = kwargs.get("quarter")
summer_term = kwargs.get("summer_term", "full-term")
return self.respond(
timer, request, get_specific_term(year, quarter), summer_term)
def respond(self, timer, request, term, summer_term):
try:
prefetch_resources(request)
by_sln = {}
# enrolled sections
try:
schedule = get_schedule_by_term(
request, term=term, summer_term=summer_term)
by_sln.update(self._get_schedule_textbooks(schedule))
order_url = get_order_url_by_schedule(schedule)
if order_url:
by_sln["order_url"] = order_url
except DataFailureException as ex:
if ex.status != 400 and ex.status != 404:
raise
# instructed sections (not split summer terms)
try:
schedule = get_instructor_schedule_by_term(
request, term=term, summer_term="full-term")
by_sln.update(self._get_schedule_textbooks(schedule))
except DataFailureException as ex:
if ex.status != 404:
raise
if len(by_sln) == 0:
log_data_not_found_response(logger, timer)
return data_not_found()
log_api_call(timer, request, "Get Textbook for {}.{}".format(
term.year, term.quarter))
return self.json_response(by_sln)
except Exception:
return handle_exception(logger, timer, traceback)
def _get_schedule_textbooks(self, schedule):
by_sln = {}
if schedule and len(schedule.sections):
book_data = get_textbook_by_schedule(schedule)
by_sln.update(index_by_sln(book_data))
return by_sln
def index_by_sln(book_data):
json_data = {}
for sln in book_data:
json_data[sln] = []
for book in book_data[sln]:
json_data[sln].append(book.json_data())
return json_data
class TextbookCur(Textbook):
"""
Performs actions on resource at /api/v1/book/current/.
"""
def get(self, request, *args, **kwargs):
"""
GET returns 200 with the current quarter Textbook
"""
timer = Timer()
try:
return self.respond(
timer, request, get_current_quarter(request), None)
except Exception:
return handle_exception(logger, timer, traceback)
| apache-2.0 | -6,605,194,879,796,673,000 | 34 | 79 | 0.603571 | false | 3.935135 | false | false | false |
jmanday/Master | TFM/library/opencv-3.2.0/modules/python/test/test_feature_homography.py | 2 | 5542 | #!/usr/bin/env python
'''
Feature homography
==================
Example of using features2d framework for interactive video homography matching.
ORB features and FLANN matcher are used. The actual tracking is implemented by
PlaneTracker class in plane_tracker.py
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
# local modules
from tst_scene_render import TestSceneRender
def intersectionRate(s1, s2):
x1, y1, x2, y2 = s1
s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
area, intersection = cv2.intersectConvexConvex(s1, np.array(s2))
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2)))
from tests_common import NewOpenCVTests
class feature_homography_test(NewOpenCVTests):
render = None
tracker = None
framesCounter = 0
frame = None
def test_feature_homography(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'),
self.get_sample('samples/data/box.png'), noise = 0.5, speed = 0.5)
self.frame = self.render.getNextFrame()
self.tracker = PlaneTracker()
self.tracker.clear()
self.tracker.add_target(self.frame, self.render.getCurrentRect())
while self.framesCounter < 100:
self.framesCounter += 1
tracked = self.tracker.track(self.frame)
if len(tracked) > 0:
tracked = tracked[0]
self.assertGreater(intersectionRate(self.render.getCurrentRect(), np.int32(tracked.quad)), 0.6)
else:
self.assertEqual(0, 1, 'Tracking error')
self.frame = self.render.getNextFrame()
# built-in modules
from collections import namedtuple
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
MIN_MATCH_COUNT = 10
'''
image - image to track
rect - tracked rectangle (x1, y1, x2, y2)
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
'''
PlanarTarget = namedtuple('PlaneTarget', 'image, rect, keypoints, descrs, data')
'''
target - reference to PlanarTarget
p0 - matched points coords in target image
p1 - matched points coords in input frame
H - homography matrix from p0 to p1
quad - target bounary quad in input frame
'''
TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad')
class PlaneTracker:
def __init__(self):
self.detector = cv2.AKAZE_create(threshold = 0.003)
self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.targets = []
self.frame_points = []
def add_target(self, image, rect, data=None):
'''Add a new tracking target.'''
x0, y0, x1, y1 = rect
raw_points, raw_descrs = self.detect_features(image)
points, descs = [], []
for kp, desc in zip(raw_points, raw_descrs):
x, y = kp.pt
if x0 <= x <= x1 and y0 <= y <= y1:
points.append(kp)
descs.append(desc)
descs = np.uint8(descs)
self.matcher.add([descs])
target = PlanarTarget(image = image, rect=rect, keypoints = points, descrs=descs, data=data)
self.targets.append(target)
def clear(self):
'''Remove all targets'''
self.targets = []
self.matcher.clear()
def track(self, frame):
'''Returns a list of detected TrackedTarget objects'''
self.frame_points, frame_descrs = self.detect_features(frame)
if len(self.frame_points) < MIN_MATCH_COUNT:
return []
matches = self.matcher.knnMatch(frame_descrs, k = 2)
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
if len(matches) < MIN_MATCH_COUNT:
return []
matches_by_id = [[] for _ in xrange(len(self.targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < MIN_MATCH_COUNT:
continue
target = self.targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [self.frame_points[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < MIN_MATCH_COUNT:
continue
p0, p1 = p0[status], p1[status]
x0, y0, x1, y1 = target.rect
quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.p0), reverse=True)
return tracked
def detect_features(self, frame):
'''detect_features(self, frame) -> keypoints, descrs'''
keypoints, descrs = self.detector.detectAndCompute(frame, None)
if descrs is None: # detectAndCompute returns descs=None if no keypoints found
descrs = []
return keypoints, descrs | apache-2.0 | -3,554,900,885,808,585,000 | 33.64375 | 111 | 0.601047 | false | 3.4 | true | false | false |
google-research/falken | service/api/model_selector.py | 1 | 21573 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Selects model based on evaluation score."""
import collections
import copy
import time
import typing
from absl import logging
from api import data_cache
from api import model_selection_record
from api.sampling import online_eval_sampling
# pylint: disable=g-bad-import-order
import common.generate_protos # pylint: disable=unused-import
import data_store_pb2
import session_pb2
from data_store import resource_store
EPISODE_SCORE_SUCCESS = 1
EPISODE_SCORE_FAILURE = -1
# Somewhat arbitrary constants used as a stopping heuristic.
_NUM_ONLINE_EVALS_PER_MODEL = 6
_NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT = 1
_MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL = 8
class ModelSelector:
"""Selects model based on evaluation score."""
def __init__(self, data_store, session_resource_id):
self._data_store = data_store
self._session_resource_id = session_resource_id
self._summary_map = None
self._session = self._data_store.read(self._session_resource_id)
self._progress = None
def get_training_state(self) -> session_pb2.SessionInfo.TrainingState:
"""Get training state of the session for this model selector.
Returns:
session_pb2.SessionInfo.TrainingState enum.
Raises:
ValueError if the session type is not supported.
"""
session_type = self._get_session_type()
if session_type == session_pb2.INTERACTIVE_TRAINING:
if self._is_session_training():
return session_pb2.SessionInfo.TRAINING
else:
return session_pb2.SessionInfo.COMPLETED
elif session_type == session_pb2.INFERENCE:
return session_pb2.SessionInfo.COMPLETED
elif session_type == session_pb2.EVALUATION:
if self._is_eval_complete():
return session_pb2.SessionInfo.COMPLETED
else:
return session_pb2.SessionInfo.TRAINING
else:
raise ValueError(f'Unsupported session type: {session_type} in session '
f'{self._session_resource_id}.')
def select_next_model(self):
"""Selects next model to try.
Returns:
resource_id.ResourceId of the model to select next.
Raises:
ValueError if model was not found or if requested for an unsupported
session type.
"""
session_type = self._get_session_type()
if session_type == session_pb2.INTERACTIVE_TRAINING:
model_resource_id = self._best_offline_or_starting_snapshot_model()
logging.info('Selected model %s for training session %s.',
model_resource_id,
self._session_resource_id)
return model_resource_id
elif session_type == session_pb2.INFERENCE:
model_resource_id = self.select_final_model()
logging.info('Selected model %s for inference session %s.',
model_resource_id,
self._session_resource_id)
return model_resource_id
elif session_type == session_pb2.EVALUATION:
# Fetch the next online eval model.
model_resource_id = self._next_online_eval_model()
if not model_resource_id:
raise ValueError(
'Empty model returned by online eval sampling for session '
f'{self._session_resource_id.session}')
logging.info('Selected model %s for evaluation session %s.',
model_resource_id,
self._session_resource_id)
return model_resource_id
else:
raise ValueError(
f'Unsupported session type: {session_type} found for session '
f'{self._session_resource_id.session}.')
def _best_offline_or_starting_snapshot_model(self):
"""Return best model resource ID by offline score or starting snapshot."""
try:
offline_model_id = self._best_offline_model()
res_id = self._data_store.resource_id_from_proto_ids(
project_id=self._session.project_id,
brain_id=self._session.brain_id,
session_id=self._session.session_id,
model_id=offline_model_id)
logging.info('Selected best offline model: %s', res_id)
return res_id
except (FileNotFoundError, ValueError):
# If offline model is not found, try getting the snapshot model.
try:
snapshot = data_cache.get_starting_snapshot(self._data_store,
self._session.project_id,
self._session.brain_id,
self._session.session_id)
res_id = self._data_store.resource_id_from_proto_ids(
project_id=snapshot.project_id,
brain_id=snapshot.brain_id,
session_id=snapshot.session,
model_id=snapshot.model)
logging.info('Selected model from snapshot: %s.', res_id)
return res_id
except (FileNotFoundError, resource_store.InternalError, ValueError):
logging.info(
'Failed to get offline model and model from starting snapshot for '
'session %s. Returning empty model.', self._session.session_id)
return None
def _best_offline_model(self):
"""Goes through offline evaluations and returns model ID with best score."""
offline_eval_summary = self._get_offline_eval_summary(
self._session_resource_id)
if not offline_eval_summary:
raise FileNotFoundError('No offline eval found for session '
f'{self._session_resource_id.session}.')
return offline_eval_summary.scores_by_offline_evaluation_id()[0][1].model_id
def _next_online_eval_model(self):
"""Selects the next model resource ID based on the online eval results."""
if not self._get_summary_map():
raise FileNotFoundError('No models found for evaluation session '
f'{self._session_resource_id.session}.')
_, model_ids, model_records = self._create_model_records()
sampling = online_eval_sampling.UCBSampling()
selected_model_index = sampling.select_next(model_records)
if selected_model_index >= len(model_ids):
raise ValueError(
f'Selected model index {selected_model_index} is larger than the '
f'number of models ({len(model_ids)}) we have available.')
if selected_model_index < 0:
raise ValueError(
f'Selected model index is less than 0: {selected_model_index}')
model_id = model_ids[selected_model_index]
snapshot = data_cache.get_starting_snapshot(self._data_store,
self._session.project_id,
self._session.brain_id,
self._session.session_id)
return self._data_store.resource_id_from_proto_ids(
project_id=snapshot.project_id,
brain_id=snapshot.brain_id,
session_id=snapshot.session,
model_id=model_id)
def _lookup_model_resource_id(self, project_id, brain_id, model_id):
"""Get model resource ID based on model ID from arbitrary session."""
res_ids, _ = self._data_store.list_by_proto_ids(
project_id=project_id,
brain_id=brain_id,
session_id='*',
model_id=model_id,
page_size=2)
if len(res_ids) != 1:
raise RuntimeError(
f'Expected one requested model with ID {model_id} in ' +
f'projects/{project_id}/brains/{brain_id}, but found {len(res_ids)}')
return res_ids[0]
def select_final_model(self):
"""Select the final model ID for each session type."""
session_type = self._get_session_type()
if session_type == session_pb2.INTERACTIVE_TRAINING:
model_id = self._best_offline_model()
return self._data_store.resource_id_from_proto_ids(
project_id=self._session.project_id,
brain_id=self._session.brain_id,
session_id=self._session.session_id,
model_id=model_id)
snapshot = data_cache.get_starting_snapshot(self._data_store,
self._session.project_id,
self._session.brain_id,
self._session.session_id)
if session_type == session_pb2.INFERENCE:
return self._lookup_model_resource_id(
snapshot.project_id,
snapshot.brain_id,
snapshot.model)
elif session_type == session_pb2.EVALUATION:
model_id = self._best_online_model()
return self._lookup_model_resource_id(
snapshot.project_id,
snapshot.brain_id,
model_id)
else:
raise ValueError(f'Unsupported session type: {session_type} found for '
'session {self._session.session_id}.')
def _best_online_model(self):
"""Select the model ID with the best online evaluation score."""
if not self._get_summary_map():
raise ValueError(
'No models found for session '
f'{self._session_resource_id.session}. Cannot compute best.')
_, model_ids, model_records = self._create_model_records()
sampling = online_eval_sampling.HighestAverageSelection()
selected_model_index = sampling.select_best(model_records)
if selected_model_index >= len(model_ids):
raise ValueError(
f'Selected model index {selected_model_index} is larger than the '
f'number of models ({len(model_ids)}) we have available.')
if selected_model_index < 0:
raise ValueError(
f'Selected model index is less than 0: {selected_model_index}')
return model_ids[selected_model_index]
def _get_session_type(self) -> session_pb2.SessionType:
return data_cache.get_session_type(
self._data_store, project_id=self._session_resource_id.project,
brain_id=self._session_resource_id.brain,
session_id=self._session_resource_id.session)
@property
def session_progress(self):
"""Get this session's progress float, lazily initialized."""
if not self._progress:
progress_per_assignment = self._data_store.get_assignment_progress(
self._session_resource_id)
if not progress_per_assignment:
self._progress = 0.0
else:
self._progress = (
sum(progress_per_assignment.values())/len(progress_per_assignment))
return self._progress
def _is_session_training(self):
return self.session_progress < 1.0
def _is_eval_complete(self):
"""Check that the total evals count is larger than the required number.
The required number is defined by _NUM_ONLINE_EVALS_PER_MODEL.
Returns:
bool: True if the required number of online evaluations have been
completed, False otherwise.
"""
total_online_evals, _, _ = self._create_model_records()
summary_map = self._get_summary_map()
return total_online_evals >= _NUM_ONLINE_EVALS_PER_MODEL * len(summary_map)
def _get_summary_map(self) -> typing.DefaultDict[
str, typing.List[model_selection_record.EvaluationSummary]]:
"""Lazily initializes map of assignment IDs to list of EvaluationSummary."""
if not self._summary_map:
before = time.perf_counter()
starting_snapshot = data_cache.get_starting_snapshot(
self._data_store, self._session.project_id, self._session.brain_id,
self._session.session_id)
offline_eval_summary = self._get_offline_eval_summary(
# Use starting snapshot to create a session resource ID.
self._data_store.resource_id_from_proto_ids(
project_id=starting_snapshot.project_id,
brain_id=starting_snapshot.brain_id,
session_id=starting_snapshot.session))
online_eval_summary = self._get_online_eval_summary()
self._summary_map = self._generate_summary_map(offline_eval_summary,
online_eval_summary)
logging.info(
'Generated summary map in %d seconds.', time.perf_counter() - before)
return self._summary_map
def _get_offline_eval_summary(
self, session_resource_id: str
) -> (model_selection_record.OfflineEvaluationByAssignmentAndEvalId):
"""Populates an OfflineEvaluationByAssignmentAndEvalId from offline evals.
Args:
session_resource_id: Resource ID for the session to read offline eval
from.
Returns:
An instance of OfflineEvaluationByAssignmentAndEvalId, which maps
(assignment_id, offline_evaluation_id) to ModelScores.
Raises:
ValueError for when the provided session does not have valid
corresponding assignments.
"""
# Get offline evals from the session of the starting snapshot in order of
# descending create time.
offline_eval_resource_ids, _ = self._data_store.list_by_proto_ids(
project_id=session_resource_id.project,
brain_id=session_resource_id.brain,
session_id=session_resource_id.session,
model_id='*',
offline_evaluation_id='*',
time_descending=True)
assignment_resource_ids, _ = self._data_store.list_by_proto_ids(
project_id=session_resource_id.project,
brain_id=session_resource_id.brain,
session_id=session_resource_id.session,
assignment_id='*')
assignment_ids = set(
[data_cache.get_assignment_id(self._data_store, res_id)
for res_id in assignment_resource_ids])
offline_eval_summary = (
model_selection_record.OfflineEvaluationByAssignmentAndEvalId())
for offline_eval_resource_id in offline_eval_resource_ids:
number_of_models_per_assignment = [
len(offline_eval_summary.model_ids_for_assignment_id(a))
for a in assignment_ids]
if (min(number_of_models_per_assignment) >=
_NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT and
sum(number_of_models_per_assignment) >=
_MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL):
# Hit all the assignments with at least
# _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT model and make
# sure we have at least _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL
# across all assignments.
break
offline_eval = self._data_store.read(offline_eval_resource_id)
if offline_eval.assignment not in assignment_ids:
raise ValueError(
f'Assignment ID {offline_eval.assignment} not found in '
f'assignments for session {session_resource_id.session}.')
models_for_assignment = offline_eval_summary.model_ids_for_assignment_id(
offline_eval.assignment)
if len(models_for_assignment) >= _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL:
# No need to look at this offline evaluation score since we have enough
# models for the assignment.
continue
scores_by_assignment_and_eval_id = offline_eval_summary[
model_selection_record.AssignmentEvalId(
assignment_id=offline_eval.assignment,
offline_evaluation_id=offline_eval.offline_evaluation_id)]
scores_by_assignment_and_eval_id.add_score(
offline_eval.model_id, offline_eval.score)
return offline_eval_summary
def _get_online_eval_summary(self) -> (
typing.DefaultDict[str, typing.List[float]]):
"""Gets list of online scores per model.
Returns:
typing.DefaultDict[str, List[float]] mapping model_id to scores.
"""
# This session is the evaluating session ID, so we look for online evals for
# self._session_resource_id.
online_eval_resource_ids, _ = self._data_store.list_by_proto_ids(
attribute_type=data_store_pb2.OnlineEvaluation,
project_id=self._session_resource_id.project,
brain_id=self._session_resource_id.brain,
session_id=self._session_resource_id.session, episode_id='*')
online_summaries = collections.defaultdict(list)
for online_eval_resource_id in online_eval_resource_ids:
online_eval = self._data_store.read(online_eval_resource_id)
online_summaries[online_eval.model].append(online_eval.score)
return online_summaries
def _generate_summary_map(
self, offline_eval_summary, online_eval_summary
) -> typing.DefaultDict[str, typing.List[
model_selection_record.EvaluationSummary]]:
"""Joins the summaries by corresponding assignment IDs and model IDs.
Args:
offline_eval_summary:
model_selection_record.OfflineEvaluationByAssignmentAndEvalId instance.
online_eval_summary: typing.DefaultDict[string, List[float]] mapping
model_id to scores.
Returns:
typing.DefaultDict[string, List[EvaluationSummary]] mapping assignment IDs
to list of EvalutionSummary.
"""
summary_map = model_selection_record.SummaryMap()
# We're allowed the max of _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL and
# number of assignments * _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT.
models_budget = max(
_MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL,
len(offline_eval_summary) * _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT)
models_by_assignment_map = copy.deepcopy(offline_eval_summary)
# First, populate with scores from top
# _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT models for each assignment.
for assignment_id in list(models_by_assignment_map.assignment_ids):
top_model_scores_for_assignment_id = (
models_by_assignment_map.scores_by_offline_evaluation_id(
assignment_id,
models_limit=_NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT))
for eval_id, model_score in top_model_scores_for_assignment_id:
self._add_summary(assignment_id, eval_id, model_score,
online_eval_summary.get(model_score.model_id, []),
summary_map)
models_by_assignment_map.remove_model(model_score.model_id)
# If we can still add more models, populate by getting one from each
# assignment.
while (summary_map.models_count < models_budget and
models_by_assignment_map):
for assignment_id in list(models_by_assignment_map.assignment_ids):
top_scores_for_assignment_id = (
models_by_assignment_map.scores_by_offline_evaluation_id(
assignment_id, models_limit=1)) # Pick off one model at a time.
for eval_id, model_score in top_scores_for_assignment_id:
self._add_summary(assignment_id, eval_id, model_score,
online_eval_summary.get(model_score.model_id, []),
summary_map)
models_by_assignment_map.remove_model(model_score.model_id)
return summary_map
def _add_summary(self, assignment_id, eval_id, model_score, online_scores,
summary_map):
"""Add or update an existing EvaluationSummary in the SummaryMap.
Args:
assignment_id: Assignment ID of the score to update the SummaryMap with.
eval_id: Offline evaluation ID of the score to update the SummaryMap with.
model_score: ModelScore instance containing information about the score to
update the SummaryMap with.
online_scores: List of online scores to update the SummaryMap with.
summary_map: SummaryMap instance to update.
"""
existing_eval_summary = (
summary_map.eval_summary_for_assignment_and_model(
assignment_id, model_score.model_id))
if existing_eval_summary:
existing_eval_summary.offline_scores[eval_id] = model_score.score
else:
summary_map[assignment_id].append(
model_selection_record.EvaluationSummary(
model_id=model_score.model_id,
offline_scores={eval_id: model_score.score},
online_scores=online_scores))
def _create_model_records(self):
"""Creates ModelRecords for sampling and return number of total eval runs.
Returns:
total_runs: Number of online evaluation runs recorded.
model_ids: List of model IDs that recorded online evaluations.
model_records: List of online_eval_sampling.ModelRecords instances.
Raises:
ValueError when size of model IDs and model records don't match.
"""
total_runs = 0
model_ids = []
model_records = []
for _, eval_summaries in self._get_summary_map().items():
for eval_summary in eval_summaries:
successes = 0
failures = 0
model_ids.append(eval_summary.model_id)
for score in eval_summary.online_scores:
if score == EPISODE_SCORE_SUCCESS:
successes += 1
elif score == EPISODE_SCORE_FAILURE:
failures += 1
else:
logging.error('Unknown online score %d.', score)
total_runs += successes + failures
model_records.append(online_eval_sampling.ModelRecord(
successes=successes, failures=failures))
if len(model_records) != len(model_ids):
raise ValueError(
'Size of model records don\'t match the size of model IDs.')
return total_runs, model_ids, model_records
| apache-2.0 | -6,392,412,219,479,050,000 | 41.718812 | 80 | 0.65225 | false | 3.931657 | false | false | false |
JoaquimPatriarca/senpy-for-gis | gasp/oss/utils.py | 1 | 1759 | """
Manage data in folders
"""
def identify_groups(folder, splitStr, groupPos, outFolder):
"""
Identifica o grupo a que um ficheiro pertence e envia-o para uma nova
pasta com os ficheiros que pertecem a esse grupo.
Como e que o grupo e identificado?
* O nome do ficheiro e partido em dois em funcao de splitStr;
* O groupPos identifica qual e a parte (primeira ou segunda) que
corresponde ao grupo.
"""
import os
from gasp.oss.info import list_files
from gasp.oss.ops import create_folder
from gasp.oss.ops import copy_file
files = list_files(folder)
# List groups and relate files with groups:
groups = {}
for _file in files:
# Split filename
filename = os.path.splitext(os.path.basename(_file))[0]
fileForm = os.path.splitext(os.path.basename(_file))[1]
group = filename.split(splitStr)[groupPos]
namePos = 1 if not groupPos else 0
if group not in groups:
groups[group] = [[filename.split(splitStr)[namePos], fileForm]]
else:
groups[group].append([filename.split(splitStr)[namePos], fileForm])
# Create one folder for each group and put there the files related
# with that group.
for group in groups:
group_folder = create_folder(os.path.join(outFolder, group))
for filename in groups[group]:
copy_file(
os.path.join(folder, '{a}{b}{c}{d}'.format(
a=filename[0], b=splitStr, c=group,
d=filename[1]
)),
os.path.join(group_folder, '{a}{b}'.format(
a=filename[0], b=filename[1]
))
)
| gpl-3.0 | 5,643,578,438,477,932,000 | 32.188679 | 79 | 0.582717 | false | 3.718816 | false | false | false |
cactaur/astropy-utils | astropy_util.py | 1 | 26719 | """A set of helper functions to work with the astropy module."""
import functools
import random
import string
import tempfile
import subprocess
import collections
from itertools import cycle, islice, chain, combinations, zip_longest
import scipy
import numpy as np
from astropy.table import Table, join
from astropy.coordinates import SkyCoord
from astropy import units as u
#from astroquery.vizier import Vizier
###############################################################################
# Astropy Utilities #
###############################################################################
def change_column_dtype(table, colname, newdtype):
'''Changes the dtype of a column in a table.
Use this function to change the dtype of a particular column in a table.
'''
tempcol = table[colname]
colindex = table.colnames.index(colname)
del(table[colname])
table.add_column(np.asanyarray(tempcol, dtype=newdtype), index=colindex)
def astropy_table_index(table, column, value):
'''Returns the row index of the table which has the value in column.
There are often times when you want to know the index of the row
where a certain column has a value. This function will return a
list of row indices that match the value in the column.'''
return astropy_table_indices(table, column, [value])
def astropy_table_indices(table, column, values):
'''Returns the row indices of the table which have the values in column.
If you need to get the indices of values located in the column of a table,
this function will determine that for you.
'''
indices = mark_selections_in_columns(table[column], values)
return np.where(indices)
def mark_selections_in_columns(col, values):
'''Return index indicating values are in col.
Returns an index array which is the size of col that indicates True when
col holds an entry equal to value, and False otherwise.'''
if len(col) > len(values)**2:
return multi_logical_or(*[col == v for v in values])
else:
try:
valset = set(values)
except TypeError:
unmasked_values = values[values.mask == False]
valset = set(unmasked_values)
index = []
for v in col:
try:
incol = v in valset
except TypeError:
incol = False
index.append(incol)
return np.array(index, dtype=np.bool)
def multi_logical_or(*arrs):
'''Performs a logical or for an arbitrary number of boolean arrays.'''
return functools.reduce(np.logical_or, arrs, False)
def multi_logical_and(*arrs):
'''Performs a logical or for an arbitrary number of boolean arrays.'''
return functools.reduce(np.logical_and, arrs, True)
def astropy_table_row(table, column, value):
'''Returns the row of the table which has the value in column.
If you want to know the row in an astropy table where a value in a
column corresponds to a given value, this function will return that
row. If there are multiple rows which match the value in the
column, you will get all of them. If no rows match the value, this
function will throw a ValueError.'''
return table[astropy_table_index(table, column, value)]
def extract_subtable_from_column(table, column, selections):
'''Returns a table which only contains values in selections.
This function will create a Table whose values in column are only
those found in selections.
'''
return table[astropy_table_indices(table, column, selections)]
def filter_column_from_subtable(table, column, selections):
'''Returns a table where none of the values in column are selections.
This function will create a Table whose values are those in column which
are not found in selections.
'''
subindices = astropy_table_indices(table, column, selections)
compindices = get_complement_indices(subindices, len(table))
return table[compindices]
def join_by_id(table1, table2, columnid1, columnid2, join_type="inner",
conflict_suffixes=("_A", "_B"), idproc=None,
additional_keys=[]):
'''Joins two tables based on columns with different names.
Table1 and table2 are the tables to be joined together. The column names
that should be joined are the two columnids. Columnid1 will be the column
name for the returned table. In case of conflicts, the
conflict suffixes will be appended to the keys with conflicts. To merge
conflicts instead of keeping them separate, add the column name to
additional_keys.
If the entries in the columns to be merged should be processed a certain
way, the function that does the processing should be given in idfilter. For
no processing, "None" should be passed instead.
'''
# Process the columns if need be.
if idproc is not None:
# I want to duplicate the data so it won't be lost. And by keeping it
# in the table, it will be preserved when it is joined.
origcol1 = table1[columnid1]
origcol2 = table2[columnid2]
randomcol1 = generate_random_string(10)
randomcol2 = generate_random_string(10)
table1.rename_column(columnid1, randomcol1)
table2.rename_column(columnid2, randomcol2)
table1[columnid1] = idproc(origcol1)
table2[columnid2] = idproc(origcol2)
# If columnid1 = columnid2, then we can go straight to a join. If not, then
# columnid2 needs to be renamed to columnid1. If table2[columnid1] exists,
# then we have a problem and an exception should be thrown.
if columnid1 != columnid2:
if columnid1 not in table2.colnames:
table2[columnid1] = table2[columnid2]
else:
raise ValueError(
"Column {0} already exists in second table.".format(columnid1))
try:
newtable = join(
table1, table2, keys=[columnid1]+additional_keys,
join_type=join_type, table_names=list(conflict_suffixes),
uniq_col_name="{col_name}{table_name}")
finally:
# Clean up the new table.
if columnid1 != columnid2:
del(table2[columnid1])
if idproc is not None:
del(table1[columnid1])
del(table2[columnid2])
del(newtable[randomcol1])
del(newtable[randomcol2])
table1.rename_column(randomcol1, columnid1)
table2.rename_column(randomcol2, columnid2)
return newtable
def join_by_ra_dec(
table1, table2, ra1="RA", dec1="DEC", ra2="RA", dec2="DEC",
ra1_unit=u.degree, dec1_unit=u.degree, ra2_unit=u.degree, dec2_unit=u.degree,
match_threshold=5*u.arcsec, join_type="inner",
conflict_suffixes=("_A", "_B")):
'''Join two tables by RA and DEC.
This function will essentially perform a join between tables using
coordinates. The column names for the coordinates should be given in ra1,
ra2, dec1, dec2.
In case of conflicts, the conflict_suffices will be used for columns in
table1 and table2, respectively.
'''
# Instead of directly using RA/Dec, we'll set up a column that maps rows in
# table 2 to rows in table2.
match_column = generate_random_string(10)
ra1_coords = table1[ra1]
try:
ra1_coords = ra1_coords.to(ra1_unit)
except u.UnitConversionError:
ra1_coords = ra1_coords * ra1_unit
dec1_coords = table1[dec1]
try:
dec1_coords = dec1_coords.to(dec1_unit)
except u.UnitConversionError:
dec1_coords = dec1_coords * dec1_unit
ra2_coords = table2[ra2]
try:
ra2_coords = ra2_coords.to(ra2_unit)
except u.UnitConversionError:
ra2_coords = ra2_coords * ra2_unit
dec2_coords = table2[dec2]
try:
dec2_coords = dec2_coords.to(dec2_unit)
except u.UnitConversionError:
dec2_coords = dec2_coords * dec2_unit
# This will cross-match the two catalogs to find the nearest matches.
coords1 = SkyCoord(ra=ra1_coords, dec=dec1_coords)
coords2 = SkyCoord(ra=ra2_coords, dec=dec2_coords)
idx, d2d, d3d = coords1.match_to_catalog_sky(coords2)
# We only count matches which are within the match threshold.
matches = d2d < match_threshold
matched_tbl1 = table1[matches]
try:
table2[match_column] = np.arange(len(table2))
matched_tbl1[match_column] = table2[idx[matches]][match_column]
newtable = join(
matched_tbl1, table2, keys=match_column,
join_type=join_type, table_names=list(conflict_suffixes),
uniq_col_name="{col_name}{table_name}")
finally:
del(table2[match_column])
del(newtable[match_column])
# Want to inherit table1 column naming.
# This will require deleting the table2 coordinates from the new table.
try:
del(newtable[ra2])
except KeyError:
# This occurs when ra1=ra2.
assert ra1==ra2
newtable.rename_column(ra1+conflict_suffixes[0], ra1)
del(newtable[ra2+conflict_suffixes[1]])
try:
del(newtable[dec2])
except KeyError:
assert dec1==dec2
newtable.rename_column(dec1+conflict_suffixes[0], dec1)
del(newtable[dec2+conflict_suffixes[1]])
return newtable
def generate_random_string(length):
'''Generate a random string with the given length.'''
return "".join([random.choice(string.ascii_letters) for _ in
range(length)])
def get_complement_indices(initindices, tablelength):
'''Returns the indices corresponding to rows not in partialtable.
This function essenially creates indices which correspond to the rows in
totaltable rows not in partialtable.
'''
compmask = np.ones(tablelength, np.bool)
compmask[initindices] = 0
return np.where(compmask)
def get_complement_table(partialtable, totaltable, compcolumn):
'''Returns a subtable of total table without rows in partialtable.
This is kinda like an operation to create a table which when stacked with
partialtable and sorted by compcolumn, will create totaltable.
'''
partialindices = astropy_table_indices(totaltable, compcolumn,
partialtable[compcolumn])
compmask = get_complement_indices(partialindices, len(totaltable))
comp_sample = totaltable[compmask]
return comp_sample
def split_table_by_value(table, column, splitvalue):
'''Bifurcates a table in two.
This function splits a table based on the values in column and returns two
tables in a 2-tuple. Values less than splitvalue are in the first tuple.
Values greater than splitvalue are in the second.
'''
lowentries = table[np.where(table[column] < splitvalue)]
highentries = table[np.where(table[column] >= splitvalue)]
return lowentries, highentries
def first_row_in_group(tablegroup):
'''Iterates through groups and selects the first row from each group.
This is good for tables where there are multiple entries for each grouping,
but the first row in the table is the preferable one. Such a thing occurs
with the Catalog of Active Binary Systems (III).
'''
rowholder = []
for group in tablegroup.groups:
rowholder.append(group[0])
filteredtable = Table(rows=rowholder, names=tablegroup.colnames)
return filteredtable
def byte_to_unicode_cast(bytearr):
'''Cast a numpy byte array to unicode.
A change in Astropy 3.0 led to some columns from FITS files being stored
as numpy byte arrays instead of string. This is an explicit cast of this
column to a string array.
https://github.com/astropy/astropy/pull/6821
The text in the bug report seems to indicate that conversion from bytes
objects to unicode should be done transparently, but this doesn't seem to
be the case.'''
strcol = np.asarray(bytearr, np.unicode_)
return strcol
def set_numeric_fill_values(table, fill_value):
'''Fill all of the columns in table specified in colnames.
This is a convenience function to be able to conveniently get a filled
table without having to manually fill a ton of columns.'''
for col in table.colnames:
if np.issubdtype(table[col].dtype, np.number):
table[col].fill_value = fill_value
def mask_numeric_fill_values(table, fill_value):
'''Fill all of the columns in table specified in colnames.
This convenience function to mask numeric columns in a table.'''
for col in table.colnames:
if np.issubdtype(table[col].dtype, np.number):
table[col] = np.ma.masked_values(table[col], fill_value)
###############################################################################
# Astroquery Catalog #
###############################################################################
def Vizier_cached_table(tblpath, tablecode):
'''Read a table from disk, querying Vizier if needed.
For large tables which can be automatically queried from Vizier, but take a
long time to download, this function will download the queried table into
tblpath, and then read from it for all following times.
The tablecode is the code (e.g. "J/A+A/512/A54/table8") uniquely
identifying the desired table.'''
try:
tbl = Table.read(str(tblpath), format="ascii.ipac")
except FileNotFoundError:
Vizier.ROW_LIMIT = -1
tbl = Vizier.get_catalogs(tablecode)[0]
tbl.write(str(tblpath), format="ascii.ipac")
return tbl
###############################################################################
# Spreadsheet help #
###############################################################################
def inspect_table_as_spreadsheet(table):
'''Opens the table in Libreoffice.
For cases where it would be much easier to look at data by analyzing it in
a spreadsheet, this function will essentially take the table and load it
into Libreoffice so that operations can be done on it.
'''
with tempfile.NamedTemporaryFile() as fp:
table.write(fp.name, format="ascii.csv")
libreargs = ["oocalc", fp.name]
try:
subprocess.run(libreargs)
except FileNotFoundError:
libreargs[0] = "localc"
subprocess.run(libreargs)
def inspect_table_in_topcat(table):
'''Opens the table in TOPCAT
TOPCAT is a useful tool for inspecting tables that are suited to be written
as FITS files. TOPCAT is actually much more extensible than we are using it
for, but it's helpful for this purpose.
'''
with tempfile.NamedTemporaryFile() as fp:
table.write(fp.name, format="fits", overwrite=True)
topcatargs = ["/home/regulus/simonian/topcat/topcat", fp.name]
subprocess.run(topcatargs)
###############################################################################
# Caching large data files #
###############################################################################
class memoized(object):
'''Decorator. Cache's a function's return value each time it is called. If
called later with the same arguments, the cached value is returned (not
reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up
print("Uncacheable")
return self.func(*args)
if args in self.cache:
print("Cached")
return self.cache[args]
else:
print("Putting into cache")
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def shortcut_file(filename, format="fits", fill_value=-9999):
''' Return a decorator that both caches the result and saves it to a file.
This decorator should be used for commonly used snippets and combinations
of tables that are small enough to be read in quickly, and processed enough
that generating them from scratch is time-intensive.
'''
class Memorize(object):
'''
A function decorated with @memorize caches its return value every time
it is called. If the function is called later with the same arguments,
the cached value is returned (the function is not reevaluated). The
cache is stored in the filename provided in shortcut_file for reuse in
future executions. If the function corresponding to this decorated has
been updated, make sure to change the object at the given filename.
'''
def __init__(self, func):
self.func = func
self.filename = filename
self.table = None
def __call__(self, *args):
if self.table is None:
try:
self.read_cache()
except FileNotFoundError:
value = self.func(*args)
self.table = value
self.save_cache()
return self.table
def read_cache(self):
'''
Read the table in from the given location. This will take the
format given in the shortcut_file command.
'''
self.table = Table.read(self.filename, format=format,
character_as_bytes=False)
mask_numeric_fill_values(self.table, fill_value)
# If the dtype is fits, then the Astropy FITS program doesn't
# convert correctly between bytes and strings.
# See https://github.com/astropy/astropy/issues/5280
def save_cache(self):
'''
Save the table into the given filename using the given format.
'''
set_numeric_fill_values(self.table, fill_value)
try:
self.table.write(self.filename, format=format)
except FileNotFoundError:
self.filename.parent.mkdir(parents=True)
self.table.write(self.filename, format=format)
def __repr__(self):
''' Return the function's docstring. '''
return self.func.__doc__
def __get__(self, obj, objtype):
''' Support instance methods. '''
return functools.partial(self.__call__, obj)
return Memorize
###############################################################################
# Itertools help #
###############################################################################
def roundrobin(*iterables):
'''roundrobin('ABC', 'D', 'EF') --> ADEBFC'''
# Recipe cedited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def take(n, iterable):
'''Return first n items of the iterable as a list.'''
return list(islice(iterable, n))
def flatten(listOfLists):
"Flatten one level of nesting"
return chain.from_iterable(listOfLists)
def random_permutation(iterable, r=None):
"""Random selection from itertools.product(*args, **kwds)"""
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def consume(iterator, n):
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def nth(iterable, n, default=None):
"Returns the nth item or a default value"
return next(islice(iterable, n, None), default)
def zip_equal(*iterables):
'''Unzips, throwing an error if iterables have different lengths.'''
sentinel = object()
for combo in zip_longest(*iterables, fillvalue=sentinel):
if sentinel in combo:
raise ValueError("Iterables have different lengths")
yield combo
###############################################################################
# Binary confidence intervals #
###############################################################################
def poisson_upper(n, sigma):
'''Return the Poisson upper limit of the confidence interval.
This is the upper limit for a given number of successes n, and the width of
the confidence interval is given in sigmas.'''
up = (n+1)*(1 - 1/9/(n+1) + sigma/3/np.sqrt(n+1))**3
return up
def scaled_poisson_upper(n, sigma, scale):
'''Return the upper limit of a scaled Poisson variable.
This is the upper limit for a given number of successes if the random
variable was scaled by a scale factor.'''
confidence_level = scipy.stats.norm.cdf(sigma)
upperlim = scipy.stats.chi2.ppf(1-(1-confidence_level)/scale, 2*n+2)/2
return upperlim
def scaled_poisson_lower(n, sigma, scale):
'''Return the lower limit of a scaled Poisson variable.
This is the lower limit for a given number of successes if the random
variable was scaled by a scale factor.'''
confidence_level = scipy.stats.norm.cdf(sigma)
lowerlim = scipy.stats.chi2.ppf(1-confidence_level/scale, 2*n)/2
return lowerlim
def poisson_upper_exact(n, sigma):
'''Return the Poisson upper limit of the confidence interval.
This is the upper limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This expression uses a
root-finding algorithm as opposed to an approximation.'''
confidence_level = scipy.stats.norm.cdf(sigma)
upperlim = scipy.stats.chi2.ppf(confidence_level, 2*n+2)/2
return upperlim
def poisson_lower_exact(n, sigma):
'''Return the Poisson lower limit of the confidence interval.
This is the lower limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This expression uses a
root-finding algorithm as opposed to an approximation.'''
confidence_level = scipy.stats.norm.cdf(sigma)
lowerlim = scipy.stats.chi2.ppf(1-confidence_level, 2*n)/2
return lowerlim
def poisson_lower(n, sigma):
'''Return the Poisson lower limit of the confidence interval.
This is the lower limit for a given number of successes n, and the width of
the confidence interval is given in sigmas. This formula is from Gehrels
(1986) and contains tuned parameters.'''
betas = {1.0: 0.0, 2.0: 0.062, 3.0:0.222}
gammas = {1.0: 0.0, 2.0: -2.19, 3.0: -1.85}
low = n * (1 - 1/9/n - sigma/3/np.sqrt(n) + betas[sigma]*n**gammas[sigma])**3
return low
def binomial_upper(n1, n, sigma=1):
'''The upper limit of the one-sigma binomial probability.
This is the upper limit for a given number of successes n1 out of n trials.
This is a numerically exact solution to the value.'''
if sigma <= 0:
raise ValueError("The probability needs to be positive.")
cl = -scipy.special.erf(-sigma)
ul = np.where(n1 != n, scipy.special.betaincinv(n1+1, n-n1, cl), 1)
return ul
def binomial_lower(n1, n, sigma=1):
'''The lower limit of the one-sigma binomial probability.
This is the lower limit for a given number of successes n1 out of n trials.
This provides a numerically exact solution to the value.'''
ll = 1 - binomial_upper(n-n1, n, sigma=sigma)
return ll
############################################################################
# Numpy help #
###############################################################################
def slicer_vectorized(arr, strindices):
'''Extract the substring at strindices from an array.
Given a string array arr, extract the substring elementwise corresponding
to the indices in strindices.'''
arr = np.array(arr, dtype=np.unicode_)
indexarr = np.array(strindices, dtype=np.int_)
temparr = arr.view('U1').reshape(len(arr), -1)[:,strindices]
return np.fromstring(temparr.tostring(), dtype='U'+str(len(indexarr)))
def check_null(arr, nullvalue):
'''Returns a boolean array indicating which values of arr are nullvalue.
The currently recognized types of nullvalues are floats, NaN, and
np.ma.masked. This function encapsulates using the appropriate methods,
because simply doing arr == nullvalue does not work all of the time,
particularly for NaN values.'''
if np.isnan(nullvalue):
return np.isnan(arr)
elif nullvalue is np.ma.masked:
return np.ma.getmaskarray(arr)
else:
return arr == nullvalue
###############################################################################
# Matplotlib Boundaries #
###############################################################################
def round_bound(lowbounds, upbounds, round_interval):
'''Return a lower and upper bound within the given rounding interval.
Generally the bounds should be the value plus or minus the error.
Round-interval should be the width of the tick marks.'''
minbound, maxbound = np.min(lowbounds), np.max(upbounds)
lowlim = (minbound // round_interval) * round_interval
highlim = ((maxbound // round_interval) + 1) * round_interval
return lowlim, highlim
def adjust_axes(ax, lowx, highx, lowy, highy, xdiff, ydiff):
'''Adjust the given axes to ensure all data fits within them.
Ensure that the given matplotlib axes can accomodate both the new x and y
limits provided in this function, as well as the internal x and y limits.
The tick intervals for x and y should be given in xdiff and ydiff.'''
min_x, max_x = round_bound(lowx, highx, xdiff)
min_y, max_y = round_bound(lowy, highy, ydiff)
prev_xmin, prev_xmax = ax.get_xlim()
prev_ymin, prev_ymax = ax.get_ylim()
min_x = min(min_x, prev_xmin)
max_x = max(max_x, prev_xmax)
min_y = min(min_y, prev_ymin)
max_y = max(max_y, prev_ymax)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
| bsd-3-clause | -1,405,172,378,491,564,800 | 37.77939 | 82 | 0.630563 | false | 4.01367 | false | false | false |
MehranMirkhan/ai_system | models/model.py | 1 | 5838 |
import tqdm
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import utils.filesys as fs
import utils.utils as ut
import utils.tfw as tfw
import utils.printer as pr
import utils.datasets as ds
""" A combination of modules that interacts with environment """
class Model:
def __init__(self, name, **kwargs):
self.name = name
self._make_model(**kwargs)
self._set_description()
self._make_var_dict()
self._make_session()
def __del__(self):
self.sess.close()
def _set_description(self, **kwargs):
desc = 'name: {0}'.format(self.name)
for module in self.modules:
desc += '\n/{0}: {1}'.format(module.name, module.get_description())
self._make_description(desc)
def _make_description(self, desc):
self.description = desc
self.desc = tf.summary.text(
'description', tf.convert_to_tensor(desc))
def _make_model(self, **kwargs):
self.modules = []
def _make_var_dict(self):
self.var_dict = {}
for module in self.modules:
self.var_dict.update(module.var_dict)
def _make_session(self):
self.model_saver = tf.train.Saver(self.var_dict)
self.initializer = tf.global_variables_initializer()
self.sess = tf.Session()
self.reset()
self.print_num_params()
def print_num_params(self):
for module in self.modules:
pr.log('# of params in {0}: {1}'.format(
module.name, module.get_num_params()))
def reset(self):
self.sess.run(self.initializer)
def save(self, path):
fs.check_dir(path, make=True)
self.model_saver.save(self.sess, path)
def load(self, path):
fs.check_dir(path, fatal=True)
self.model_saver.restore(self.sess, path)
#---------------------------------------------------------------------------#
#--------------------------------- System ----------------------------------#
#---------------------------------------------------------------------------#
class System:
def __init__(self, dataset):
self.dataset = dataset
def train_x(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs):
gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size)
for batch in gen:
result = train_func(batch['x'], **kwargs)
for proc in procs:
proc.process(batch, result)
def train_xy(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs):
gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size)
for batch in gen:
result = train_func(batch['x'], batch['y'], **kwargs)
for proc in procs:
proc.process(batch, result)
def train_batch(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs):
gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size)
for batch in gen:
result = train_func(batch, batch_count, **kwargs)
for proc in procs:
proc.process(batch, result)
def test_x(self, test_func, batch_size=100):
gen = ds.epoch_generator(self.dataset['test'], batch_size)
results = []
for batch in gen:
results.append(test_func(batch['x']))
return np.mean(results)
def test_xy(self, test_func, batch_size=100):
gen = ds.epoch_generator(self.dataset['test'], batch_size)
results = []
for batch in gen:
results.append(test_func(batch['x'], batch['y']))
return np.mean(results)
#---------------------------------------------------------------------------#
#------------------------------- Processors --------------------------------#
#---------------------------------------------------------------------------#
class ResultProcessor:
def process(self, batch, result):
pass
class Logger(ResultProcessor):
def __init__(self, model, log_dir='./log/', scalar_step=1, image_step=1000):
self.log_dir = log_dir + str(ut.generate_id()) + '/'
fs.check_dir(self.log_dir, make=True)
self.summary_saver = tf.summary.FileWriter(self.log_dir, model.sess.graph)
self.log(model.sess.run(model.desc))
self.scalar_step = scalar_step
self.image_step = image_step
def process(self, batch, result):
gs = result['global_step']
s = result.get('summary', None)
si = result.get('summary-image', None)
if gs % self.scalar_step == 0:
self.log(s, gs)
if gs % self.image_step == 0:
self.log(si, gs)
def log(self, summary=None, global_step=0):
if summary is not None:
self.summary_saver.add_summary(summary, global_step)
class Reporter(ResultProcessor):
def __init__(self, steps=100, kwords=[], log_dir=None):
self.steps = steps
self.kwords = kwords
self.log_dir = log_dir
def process(self, batch, result):
step = batch['step']
if step % self.steps == 0:
report = '[step {0}]'.format(step)
if 'global_step' in result:
report += '[gstep {0}]'.format(result['global_step'])
for word in self.kwords:
report += '[{0} {1:.4f}]'.format(word, result[word])
tqdm.tqdm.write(report)
self.log2file(report)
def log2file(self, msg):
if self.log_dir is not None:
with open(self.log_dir + "report.txt", "a") as file:
file.write(msg + "\n")
file.close()
class Presenter(ResultProcessor):
def __init__(self, name, sess, func, fig_num=1, steps=100, logger=None):
self.name = name
self.sess = sess
self.func = func
self.fig_num = fig_num
self.steps = steps
self.logger = logger
plt.ion()
if logger is not None:
image = func()
self.image_ph = tf.placeholder(shape=image.shape, dtype=tf.float32)
self.image_r = tfw.compact(self.image_ph)
self.summary = tf.summary.image(name, self.image_r)
def __del__(self):
plt.ioff()
plt.show()
def process(self, batch, result):
gstep = result['global_step']
if gstep % self.steps == 0:
images = self.func()
# Summarizing the image
if self.logger is not None:
self.logger.log(
self.sess.run(self.summary, {self.image_ph: images}), gstep)
# Displaying the image
plt.figure(self.fig_num)
ut.show2(images)
plt.pause(1e-5)
| mit | -2,729,038,854,251,442,700 | 26.933014 | 87 | 0.617506 | false | 3.042209 | true | false | false |
LethusTI/supportcenter | vendor/lethusbox/lethusbox/fs/models.py | 1 | 7782 | # -*- coding: utf-8 -*-
__all__ = ('Folder', 'FileFolder', 'Album', 'PhotoAlbum')
import datetime
from mongoengine import *
from bson import ObjectId
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
from filesize import size
PHOTO_MIMETYPES = {
'PNG': 'image/png',
'JPEG': 'image/jpeg',
'JPG': 'image/jpeg',
'GIF': 'image/gif',
#TODO: se encontrar mais coloque aqui
}
class Album(Document):
"""
Classe que representa um álbum.
Atributos:
* ref: referência para o documento dono do álbum
* created: data de criação do álbum
"""
ref = GenericReferenceField() # documento dono do album
created = DateTimeField(default=datetime.datetime.now)
@classmethod
def new_from_owner(cls, owner):
"""
Cria um novo album para um objeto
"""
obj = cls(ref=owner)
obj.save()
return obj
@property
def photos(self):
"""
Retorna uma queryset das fotos do álbum
"""
return PhotoAlbum.objects(album=self)
def put_file(self, infile, pk=None):
"""
Insere uma foto no album
* infile: arquivo do tipo IO de entrada.
* pk: para definir uma chave primaria para foto (não obrigatório).
"""
photo = PhotoAlbum()
photo.image.put(infile)
photo.album = self
if pk:
photo.pk = ObjectId(pk)
photo.save()
return photo
def delete_all_photos(self):
"""
Remove todas as fotos do álbum
"""
for photo in self.photos:
photo.delete()
def delete(self, *args, **kwargs):
"""
Apaga o album e suas fotos
"""
self.delete_all_photos()
return super(Album, self).delete(*args, **kwargs)
def lock_photos(self):
"""
Tranca as fotos para proibir a remoção.
"""
for photo in self.photos:
photo.locked = True
photo.save()
meta = {'allow_inheritance': False,
'collection': 'album',
'indexes': [
{'fields': ['ref']},
]}
class PhotoAlbum(Document):
"""
Representa uma foto de um álbum.
Atributos:
* image: ImageField para armazenar a imagem tamanho máximo 800x600, thumbnail de 160x120.
* locked: se a foto está tracada para remoção/edição.
* comment: comentário da foto.
* created: data e hora de criação da foto
* album: álbum referênte da foto.
"""
image = ImageField(
db_alias='fs', #database especial para arquivos
size=(800, 600),
thumbnail_size=(160, 120, False))
locked = BooleanField(default=False)
comment = StringField(max_length=200)
created = DateTimeField(default=datetime.datetime.now)
album = ReferenceField('Album')
@property
def mimetype(self):
return PHOTO_MIMETYPES.get(self.image.format)
def as_response(self):
"""
Retorna a resposta HTTP contento a imagem
"""
wrapper = FileWrapper(self.image)
response = HttpResponse(wrapper, content_type=self.mimetype)
response['Cache-Control'] = 'no-store, no-cache, must-revalidate'
response['Expires'] = 'Sat, 26 Jul 1997 05:00:00 GMT'
response['Pragma'] = 'no-cache'
return response
def json_format(self):
"""
Retorna em formato JSON para re-envio.
"""
return {
'pk': str(self.pk),
'comment': self.comment
}
def as_thumb_response(self):
"""
Retorna a resposta HTTP contento a imagem em thumbnail.
"""
wrapper = FileWrapper(self.image.thumbnail)
response = HttpResponse(wrapper, content_type=self.mimetype)
response['Cache-Control'] = 'no-store, no-cache, must-revalidate'
response['Expires'] = 'Sat, 26 Jul 1997 05:00:00 GMT'
response['Pragma'] = 'no-cache'
return response
def delete(self, force=False, *args, **kwargs):
"""
Deleta a imagem do banco de dados.
"""
if self.locked and not force:
return
self.image.delete() #apaga a imagem do banco de dados
return super(PhotoAlbum, self).delete(*args, **kwargs)
meta = {'allow_inheritance': False,
'collection': 'album_photo',
'ordering': ['created'],
'indexes': [
{'fields': ['album', 'created']},
{'fields': ['created']},
]}
class FileFolder(Document):
"""
Representa um arquivo de uma pasta.
"""
file = FileField(db_alias='fs') #database especial para arquivos
created = DateTimeField(default=datetime.datetime.now)
folder = ReferenceField('Folder')
@property
def mimetype(self):
return getattr(self.file, 'content_type')
@property
def filename(self):
"""
Retorna o nome do arquivo
"""
return getattr(self.file, 'name')
@property
def size(self):
"""
Retorna o tamanho em bytes do arquivo.
"""
return getattr(self.file, 'length')
@property
def human_size(self):
"""
Retorna o tamanho humanizado do arquivo
"""
return size(self.size)
def json_format(self):
"""
Retorna o arquivo em formato JSON
"""
return {
'pk': str(self.pk),
'filename': self.filename,
'human_size': self.human_size
}
def as_response(self):
"""
Retorna resposta HTTP contendo o arquivo anexado.
"""
wrapper = FileWrapper(self.file)
response = HttpResponse(wrapper, content_type=self.mimetype)
response['Content-Disposition'] = (
u'attachment; filename=%s' % self.filename).encode('utf8')
response['Cache-Control'] = 'no-cache'
return response
def delete(self, *args, **kwargs):
"""
Remove o arquivo do banco de dados.
"""
self.file.delete() #apaga a imagem do banco de dados
return super(FileFolder, self).delete(*args, **kwargs)
meta = {'allow_inheritance': False,
'collection': 'file_folder',
'ordering': ['created'],
'indexes': [
{'fields': ['folder', 'created']},
{'fields': ['created']},
]}
class Folder(Document):
"""
Representa uma pasta de arquivos.
Atributos:
* ref: referência para o documento dono da pasta
* created: data e hora de criação da pasta
"""
ref = GenericReferenceField() # documento dono da pasta
created = DateTimeField(default=datetime.datetime.now)
@classmethod
def new_from_owner(cls, owner):
"""
Cria uma pasta para um objeto
"""
obj = cls(ref=owner)
obj.save()
return obj
@property
def files(self):
"""
Retorna os arquivos contidos na pasta
"""
return FileFolder.objects(folder=self)
def put_file(self, infile, **kwargs):
"""
Insere um arquivo na pasta
"""
f = FileFolder()
f.file.put(infile, **kwargs)
f.folder = self
f.save()
return f
def delete(self, *args, **kwargs):
"""
Apaga a pasta e seus arquivos
"""
for f in self.files:
f.delete()
return super(Folder, self).delete(*args, **kwargs)
meta = {'allow_inheritance': False,
'collection': 'folder',
'indexes': [
{'fields': ['ref']},
]}
| gpl-3.0 | 99,696,159,870,371,710 | 25.02349 | 93 | 0.554997 | false | 3.797747 | false | false | false |
onlynone/pysyscmd | syscmd/cmds.py | 1 | 2333 | # -*- coding: utf-8 -*-
class Syscmd(object):
def _cmd(self, cmd, *args, **kwargs):
"""Execute system commands
Args:
*args: The positional arguments are used as arguments to the
command. For example, the following python code:
_cmd("git, "commit", "--help")
would execute:
git commit --help
f: One of CALL, CHECK_CALL, or CHECK_OUTPUT. Corresponds to the
function from the subprocess module called to execute the command.
Defaults to CHECK_CALL
**kwargs: The keyword arguments are passed through to the subprocess
function as-is.
Returns:
Whatever is returned by the respective subprocess function. For
example, f=CALL would return the returncode attribute, and
f=CHECK_OUTPUT would return the content of stdout.
Exmples:
The following call:
_cmd("git", "commit", "-m", "Commit Message", cwd="/path/to/repo")
results in:
subprocess.check_call(["git", "commit", "-m", "Commit Message"], cwd="/path/to/repo")
And:
_cmd("git", "checkout", "-b", "branch_name", f=CHECK_OUTPUT, cwd="/path/to/repo")
results in:
subprocess.check_output(["git", "checkout", "-b", "branch_name"], cwd="/path/to/repo")
"""
import syscmd
f = kwargs.pop('f', syscmd.CHECK_CALL)
f = syscmd._sub_calls[f]
full_args = (cmd,) + tuple(args)
full_kwargs = syscmd._default_subprocess_kwargs.copy()
full_kwargs.update(kwargs)
return f(full_args, **full_kwargs)
def _which(self, cmd):
import os
for path in os.environ.get('PATH', '').split(os.pathsep):
if path == "":
continue
full_path = os.path.join(path, cmd)
if os.access(full_path, os.X_OK):
return full_path
return None
def __getattr__(self, name):
from functools import partial
cmd = self._which(name)
if cmd != None:
return partial(self._cmd, cmd)
raise AttributeError("'module' object has no attribute %r" % (name,))
import sys
sys.modules[__name__] = Syscmd()
| bsd-3-clause | -7,051,081,000,597,903,000 | 27.108434 | 102 | 0.545221 | false | 4.211191 | false | false | false |
YunoHost/yunohost | doc/generate_manpages.py | 1 | 2344 | """
Inspired by yunohost_completion.py (author: Christophe Vuillot)
=======
This script generates man pages for yunohost.
Pages are stored in OUTPUT_DIR
"""
import os
import yaml
import gzip
import argparse
from datetime import date
from collections import OrderedDict
from jinja2 import Template
base_path = os.path.split(os.path.realpath(__file__))[0]
template = Template(open(os.path.join(base_path, "manpage.template")).read())
THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ACTIONSMAP_FILE = os.path.join(THIS_SCRIPT_DIR, '../data/actionsmap/yunohost.yml')
def ordered_yaml_load(stream):
class OrderedLoader(yaml.Loader):
pass
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
lambda loader, node: OrderedDict(loader.construct_pairs(node)))
return yaml.load(stream, OrderedLoader)
def main():
parser = argparse.ArgumentParser(description="generate yunohost manpage based on actionsmap.yml")
parser.add_argument("-o", "--output", default="output/yunohost")
parser.add_argument("-z", "--gzip", action="store_true", default=False)
args = parser.parse_args()
if os.path.isdir(args.output):
if not os.path.exists(args.output):
os.makedirs(args.output)
output_path = os.path.join(args.output, "yunohost")
else:
output_dir = os.path.split(args.output)[0]
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = args.output
# man pages of "yunohost *"
with open(ACTIONSMAP_FILE, 'r') as actionsmap:
# Getting the dictionary containning what actions are possible per domain
actionsmap = ordered_yaml_load(actionsmap)
for i in actionsmap.keys():
if i.startswith("_"):
del actionsmap[i]
today = date.today()
result = template.render(
month=today.strftime("%B"),
year=today.year,
categories=actionsmap,
str=str,
)
if not args.gzip:
with open(output_path, "w") as output:
output.write(result)
else:
with gzip.open(output_path, mode="w", compresslevel=9) as output:
output.write(result)
if __name__ == '__main__':
main()
| agpl-3.0 | 4,693,860,435,264,777,000 | 26.576471 | 101 | 0.638225 | false | 3.679749 | false | false | false |
eyedeeemm/nitrogen | win/pywin.py | 1 | 4849 | #!/usr/bin/env python3
import math, os, ctypes
import sdl2, sdl2.sdlmixer, sdl2.sdlimage
class Win:
# s, w, h, rect_ref
# run_fps, run_t_end, run_t_begin, run_frame_t, run_frame_t_ms
# window, windowsurface
# event, event_ref
# on_key, on_click, on_declick, on_move
# draw
def __init__ (self, w, h, s='win', run_fps = 60,
on_key = None, on_click = None, on_declick = None, on_move = None):
self.w, self.h = (w, h)
self.s = s
self.run_fps = run_fps
self.on_key = on_key
self.on_click = on_click
self.on_declick = on_declick
self.on_move = on_move
self.keyc = sdl2.keycode.SDL_SCANCODE_UNKNOWN
self.keys = [False] * sdl2.SDL_NUM_SCANCODES
self.mx, self.my = (0, 0)
self.mx_f, self.my_f = (0.0, 0.0)
self.mb0_, self.mb1_, self.mb2_ = (False, False, False)
if not self.create ():
return None
def create (self):
self.draw = ctypes.cdll.LoadLibrary ('draw.so')
if not self.draw:
return False
if sdl2.SDL_Init (sdl2.SDL_INIT_VIDEO):
return False
self.window = sdl2.SDL_CreateWindow (bytes(self.s, 'utf-8'),
sdl2.SDL_WINDOWPOS_CENTERED, sdl2.SDL_WINDOWPOS_CENTERED,
self.w, self.h, sdl2.SDL_WINDOW_SHOWN)
if not self.window:
return False
self.windowsurface = sdl2.SDL_GetWindowSurface (self.window)
# sdl2.mouse.SDL_ShowCursor (False)
self.rect_ref = ctypes.byref (sdl2.SDL_Rect (0, 0,
self.w, self.h))
self.draw.draw_init (ctypes.c_void_p (
self.windowsurface.contents.pixels), self.w, self.h)
sdl2.SDL_UpdateWindowSurface (self.window)
self.event = sdl2.SDL_Event ()
self.event_ref = ctypes.byref (self.event)
self.run_frame_t = 1.0 / self.run_fps
self.run_frame_t_ms = int(self.run_frame_t * 1000.0)
self.run_t_begin = self.run_t_end = sdl2.SDL_GetTicks ()
return True
def quit (self):
sdl2.SDL_DestroyWindow (self.window)
sdl2.SDL_Quit ()
def tick (self):
while sdl2.SDL_PollEvent (self.event_ref) != 0:
if self.event.type == sdl2.SDL_QUIT:
return False
elif self.event.type == sdl2.SDL_KEYDOWN:
if self.event.key.keysym.scancode ==\
sdl2.keycode.SDL_SCANCODE_ESCAPE:
return False
self.keyc = self.event.key.keysym.scancode
self.keys [self.keyc] = True
if self.on_key:
self.on_key ()
elif self.event.type == sdl2.SDL_KEYUP:
self.keyc = sdl2.keycode.SDL_SCANCODE_UNKNOWN
self.keys [self.keyc] = False
elif self.event.type == sdl2.SDL_MOUSEBUTTONDOWN:
if self.event.button.button == sdl2.SDL_BUTTON_LEFT:
self.mb0_ = True
elif self.event.button.button == sdl2.SDL_BUTTON_RIGHT:
self.mb1_ = True
elif self.event.button.button == sdl2.SDL_BUTTON_MIDDLE:
self.mb2_ = True
self.mx = self.event.button.x
self.my = self.event.button.y
self.mx_f = (self.mx / self.w) - 0.5
self.my_f = (self.my / self.h) - 0.5
if self.on_click:
self.on_click ()
elif self.event.type == sdl2.SDL_MOUSEBUTTONUP:
self.mx = self.event.button.x
self.my = self.event.button.y
self.mx_f = (self.mx / self.w) - 0.5
self.my_f = (self.my / self.h) - 0.5
if self.on_declick:
self.on_declick ()
if self.event.button.button == sdl2.SDL_BUTTON_LEFT:
self.mb0_ = False
elif self.event.button.button == sdl2.SDL_BUTTON_RIGHT:
self.mb1_ = False
elif self.event.button.button == sdl2.SDL_BUTTON_MIDDLE:
self.mb2_ = False
elif self.event.type == sdl2.SDL_MOUSEMOTION:
self.mx = self.event.motion.x
self.my = self.event.motion.y
self.mx_f = (self.mx / self.w) - 0.5
self.my_f = (self.my / self.h) - 0.5
if self.on_move:
self.on_move ()
self.run_t_end = sdl2.SDL_GetTicks ()
_t = self.run_t_begin - self.run_t_end + self.run_frame_t_ms
if _t > 0:
sdl2.SDL_Delay (_t)
self.run_t_begin = sdl2.SDL_GetTicks ()
return True
def render (self):
sdl2.SDL_UpdateWindowSurface (self.window)
return True
| unlicense | 2,487,375,818,377,089,000 | 33.635714 | 79 | 0.516601 | false | 3.247823 | false | false | false |
bluedynamics/bda.disclaimer | src/bda/disclaimer/browser/disclaimer.py | 1 | 1932 | from zExceptions import Redirect
from zope.component import ComponentLookupError
from plone.app.layout.viewlets.common import ViewletBase
from Products.Five import BrowserView
from Products.CMFPlone.utils import getToolByName
from bda.disclaimer.interfaces import IDisclaimerText
class DisclaimerViewlet(ViewletBase):
def update(self):
self.accepted = self.request.cookies.get('_dc_acc')
def render(self):
if self.accepted \
or self.request['ACTUAL_URL'].endswith('/@@disclaimer'):
return ''
purl = getToolByName(self.context, 'portal_url')
pobj = purl.getPortalObject()
url = '%s/%s' % (pobj.absolute_url(), '@@disclaimer')
raise Redirect(url)
class DisclaimerPage(BrowserView):
def currentlang(self):
plt = getToolByName(self.context, 'portal_languages')
if not plt:
return None
return plt.getLanguageBindings()[0]
def pagetitle(self):
purl = getToolByName(self.context, 'portal_url')
pobj = purl.getPortalObject()
return pobj.title
def checkdisclaimer(self):
display = True
if self.request.form.get('_dc_accept') == '1' \
and self.request.form.get('_dc_submitted'):
self.request.response.setCookie('_dc_acc', '1', path='/')
display = False
elif self.request.cookies.get('_dc_acc'):
display = False
if not display:
purl = getToolByName(self.context, 'portal_url')
pobj = purl.getPortalObject()
url = pobj.absolute_url()
raise Redirect(url)
def disclaimertext(self):
try:
return IDisclaimerText(self.context)()
except ComponentLookupError, e:
return 'No Disclaimer text registered. %s' % str(e)
except AttributeError, e:
return 'Disclaimer Text not provided properly. %s' % str(e)
| bsd-3-clause | 66,484,574,228,338,460 | 32.894737 | 71 | 0.628364 | false | 3.90303 | false | false | false |
aerospike/aerospike-admin | test/e2e/util.py | 1 | 5905 | # Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
def parse_record(parent_field, record):
field_names = []
field_values = []
for name in record:
if isinstance(record[name], dict):
new_parent_field = parent_field.copy()
new_parent_field.append(name)
names = " ".join(new_parent_field)
if "converted" in record[name]:
field_names.append(names)
field_values.append(record[name]["converted"])
elif "raw" in record[name]:
field_names.append(names)
field_values.append(record[name]["raw"])
else:
# Must have subgroups:
sub_names, sub_values = parse_record(new_parent_field, record[name])
field_names.extend(sub_names)
field_values.extend(sub_values)
else:
raise Exception("Unhandled parsing")
return field_names, field_values
def parse_output(actual_out={}, horizontal=False, header_len=2, merge_header=True):
"""
commmon parser for all show commands will return tuple of following
@param heading : first line of output
@param header: Second line of output
@param params: list of parameters
"""
title = actual_out["title"]
description = actual_out.get("description", "")
data_names = {}
data_values = []
num_records = 0
for group in actual_out["groups"]:
for record in group["records"]:
temp_names, temp_values = parse_record([], record)
# We assume every record has the same set of names
if len(data_names) == 0:
data_names = temp_names
data_values.append(temp_values)
num_records += 1
return title, description, data_names, data_values, num_records
def get_separate_output(in_str=""):
_regex = re.compile(r"((?<=^{).*?(?=^}))", re.MULTILINE | re.DOTALL)
out = re.findall(_regex, in_str)
ls = []
for item in out:
item = remove_escape_sequence(item)
item = "{" + item + "}"
ls.append(json.loads(item))
return ls
def get_merged_header(*lines):
h = [[_f for _f in _h.split(" ") if _f] for _h in lines]
header = []
if len(h) == 0 or any(len(h[i]) != len(h[i + 1]) for i in range(len(h) - 1)):
return header
for idx in range(len(h[0])):
header_i = h[0][idx]
for jdx in range(len(h) - 1):
if h[jdx + 1][idx] == ".":
break
header_i += " " + h[jdx + 1][idx]
header.append(header_i)
return header
def check_for_subset(actual_list, expected_sub_list):
if not expected_sub_list:
return True
if not actual_list:
return False
for i in expected_sub_list:
if isinstance(i, tuple):
found = False
for s_i in i:
if s_i is None:
found = True
break
if s_i in actual_list:
found = True
break
if not found:
print(i, actual_list)
return False
else:
if i not in actual_list:
print(i)
return False
return True
# Checks that a single expected list has a subset equal to actual_list.
def check_for_subset_in_list_of_lists(actual_list, list_of_expected_sub_lists):
for expected_list in list_of_expected_sub_lists:
if check_for_subset(actual_list, expected_list):
return True
return False
def remove_escape_sequence(line):
ansi_escape = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]")
return ansi_escape.sub("", line)
def check_for_types(actual_lists, expected_types):
def is_float(x):
try:
val = float(x)
if "." in x:
return True
return False
except ValueError:
return False
def is_int(x):
try:
val = int(x)
if "." in x:
return False
return True
except ValueError:
return False
def is_bool(x):
if x in ("True", "true", "False", "false"):
return True
return False
def check_list_against_types(a_list):
if a_list is None or expected_types is None:
return False
if len(a_list) == len(expected_types):
for idx in range(len(a_list)):
typ = expected_types[idx]
val = a_list[idx]
if typ == int:
if not is_int(val):
return False
elif typ == float:
if not is_float(val):
return False
elif typ == bool:
if not is_bool(val):
return False
elif typ == str:
if any([is_bool(val), is_int(val), is_float(val)]):
return False
else:
raise Exception("Type is not yet handles in test_util.py", typ)
return True
return False
for actual_list in actual_lists:
if check_list_against_types(actual_list) == False:
return False
return True
| apache-2.0 | 3,042,231,427,992,210,000 | 30.243386 | 84 | 0.537341 | false | 4.038988 | false | false | false |
phalcon/readthedocs.org | readthedocs/core/forms.py | 5 | 3165 | import logging
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet
from django import forms
from django.forms.fields import CharField
from django.utils.translation import ugettext_lazy as _
from models import UserProfile
log = logging.getLogger(__name__)
class UserProfileForm(forms.ModelForm):
first_name = CharField(label=_('First name'), required=False)
last_name = CharField(label=_('Last name'), required=False)
class Meta:
model = UserProfile
# Don't allow users edit someone else's user page,
exclude = ('user', 'whitelisted')
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
try:
self.fields['first_name'].initial = self.instance.user.first_name
self.fields['last_name'].initial = self.instance.user.last_name
except:
pass
def save(self, *args, **kwargs):
first_name = self.cleaned_data.pop('first_name', None)
last_name = self.cleaned_data.pop('last_name', None)
profile = super(UserProfileForm, self).save(*args, **kwargs)
if kwargs.get('commit', True):
user = profile.user
user.first_name = first_name
user.last_name = last_name
user.save()
return profile
class FacetField(forms.MultipleChoiceField):
'''
For filtering searches on a facet, with validation for the format
of facet values.
'''
def valid_value(self, value):
'''
Although this is a choice field, no choices need to be supplied.
Instead, we just validate that the value is in the correct format
for facet filtering (facet_name:value)
'''
if ":" not in value:
return False
return True
class FacetedSearchForm(SearchForm):
'''
Supports fetching faceted results with a corresponding query.
`facets`
A list of facet names for which to get facet counts
`models`
Limit the search to one or more models
'''
selected_facets = FacetField(required=False)
def __init__(self, *args, **kwargs):
facets = kwargs.pop('facets', [])
models = kwargs.pop('models', [])
super(FacetedSearchForm, self).__init__(*args, **kwargs)
for facet in facets:
self.searchqueryset = self.searchqueryset.facet(facet)
if models:
self.searchqueryset = self.searchqueryset.models(*models)
def clean_selected_facets(self):
facets = self.cleaned_data['selected_facets']
cleaned_facets = []
clean = SearchQuerySet().query.clean
for facet in facets:
field, value = facet.split(":", 1)
if not value: # Ignore empty values
continue
value = clean(value)
cleaned_facets.append(u'%s:"%s"' % (field, value))
return cleaned_facets
def search(self):
sqs = super(FacetedSearchForm, self).search()
for facet in self.cleaned_data['selected_facets']:
sqs = sqs.narrow(facet)
self.searchqueryset = sqs
return sqs
| mit | 4,467,745,558,170,281,000 | 31.628866 | 77 | 0.618009 | false | 4.131854 | false | false | false |
kogotko/carburetor | openstack_dashboard/api/neutron.py | 1 | 48951 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Cisco Systems, Inc.
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import copy
import logging
import netaddr
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
import six
from horizon import exceptions
from horizon import messages
from horizon.utils.memoized import memoized
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.api import nova
from openstack_dashboard.contrib.developer.profiler import api as profiler
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'}
OFF_STATE = 'OFF'
ON_STATE = 'ON'
ROUTER_INTERFACE_OWNERS = (
'network:router_interface',
'network:router_interface_distributed',
'network:ha_router_replicated_interface'
)
class NeutronAPIDictWrapper(base.APIDictWrapper):
def __init__(self, apidict):
if 'admin_state_up' in apidict:
if apidict['admin_state_up']:
apidict['admin_state'] = 'UP'
else:
apidict['admin_state'] = 'DOWN'
# Django cannot handle a key name with ':', so use '__'.
apidict.update({
key.replace(':', '__'): value
for key, value in apidict.items()
if ':' in key
})
super(NeutronAPIDictWrapper, self).__init__(apidict)
def set_id_as_name_if_empty(self, length=8):
try:
if not self._apidict['name'].strip():
id = self._apidict['id']
if length:
id = id[:length]
self._apidict['name'] = '(%s)' % id
except KeyError:
pass
def items(self):
return self._apidict.items()
@property
def name_or_id(self):
return (self._apidict.get('name').strip() or
'(%s)' % self._apidict['id'][:13])
class Agent(NeutronAPIDictWrapper):
"""Wrapper for neutron agents."""
class Network(NeutronAPIDictWrapper):
"""Wrapper for neutron Networks."""
class Subnet(NeutronAPIDictWrapper):
"""Wrapper for neutron subnets."""
def __init__(self, apidict):
apidict['ipver_str'] = get_ipver_str(apidict['ip_version'])
super(Subnet, self).__init__(apidict)
class SubnetPool(NeutronAPIDictWrapper):
"""Wrapper for neutron subnetpools."""
class Port(NeutronAPIDictWrapper):
"""Wrapper for neutron ports."""
def __init__(self, apidict):
if 'mac_learning_enabled' in apidict:
apidict['mac_state'] = \
ON_STATE if apidict['mac_learning_enabled'] else OFF_STATE
pairs = apidict.get('allowed_address_pairs')
if pairs:
apidict = copy.deepcopy(apidict)
wrapped_pairs = [PortAllowedAddressPair(pair) for pair in pairs]
apidict['allowed_address_pairs'] = wrapped_pairs
super(Port, self).__init__(apidict)
class PortAllowedAddressPair(NeutronAPIDictWrapper):
"""Wrapper for neutron port allowed address pairs."""
def __init__(self, addr_pair):
super(PortAllowedAddressPair, self).__init__(addr_pair)
# Horizon references id property for table operations
self.id = addr_pair['ip_address']
class Router(NeutronAPIDictWrapper):
"""Wrapper for neutron routers."""
class RouterStaticRoute(NeutronAPIDictWrapper):
"""Wrapper for neutron routes extra route."""
def __init__(self, route):
super(RouterStaticRoute, self).__init__(route)
# Horizon references id property for table operations
self.id = route['nexthop'] + ":" + route['destination']
class SecurityGroup(NeutronAPIDictWrapper):
# Required attributes: id, name, description, tenant_id, rules
def __init__(self, sg, sg_dict=None):
if sg_dict is None:
sg_dict = {sg['id']: sg['name']}
sg['rules'] = [SecurityGroupRule(rule, sg_dict)
for rule in sg['security_group_rules']]
super(SecurityGroup, self).__init__(sg)
def to_dict(self):
return {k: self._apidict[k] for k in self._apidict if k != 'rules'}
@six.python_2_unicode_compatible
class SecurityGroupRule(NeutronAPIDictWrapper):
# Required attributes:
# id, parent_group_id
# ip_protocol, from_port, to_port, ip_range, group
# ethertype, direction (Neutron specific)
def _get_secgroup_name(self, sg_id, sg_dict):
if sg_id:
if sg_dict is None:
sg_dict = {}
# If sg name not found in sg_dict,
# first two parts of UUID is used as sg name.
return sg_dict.get(sg_id, sg_id[:13])
else:
return u''
def __init__(self, sgr, sg_dict=None):
# In Neutron, if both remote_ip_prefix and remote_group_id are None,
# it means all remote IP range is allowed, i.e., 0.0.0.0/0 or ::/0.
if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']:
if sgr['ethertype'] == 'IPv6':
sgr['remote_ip_prefix'] = '::/0'
else:
sgr['remote_ip_prefix'] = '0.0.0.0/0'
rule = {
'id': sgr['id'],
'parent_group_id': sgr['security_group_id'],
'direction': sgr['direction'],
'ethertype': sgr['ethertype'],
'ip_protocol': sgr['protocol'],
'from_port': sgr['port_range_min'],
'to_port': sgr['port_range_max'],
}
cidr = sgr['remote_ip_prefix']
rule['ip_range'] = {'cidr': cidr} if cidr else {}
group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict)
rule['group'] = {'name': group} if group else {}
super(SecurityGroupRule, self).__init__(rule)
def __str__(self):
if 'name' in self.group:
remote = self.group['name']
elif 'cidr' in self.ip_range:
remote = self.ip_range['cidr']
else:
remote = 'ANY'
direction = 'to' if self.direction == 'egress' else 'from'
if self.from_port:
if self.from_port == self.to_port:
proto_port = ("%s/%s" %
(self.from_port, self.ip_protocol.lower()))
else:
proto_port = ("%s-%s/%s" %
(self.from_port, self.to_port,
self.ip_protocol.lower()))
elif self.ip_protocol:
try:
ip_proto = int(self.ip_protocol)
proto_port = "ip_proto=%d" % ip_proto
except Exception:
# well-defined IP protocol name like TCP, UDP, ICMP.
proto_port = self.ip_protocol
else:
proto_port = ''
return (_('ALLOW %(ethertype)s %(proto_port)s '
'%(direction)s %(remote)s') %
{'ethertype': self.ethertype,
'proto_port': proto_port,
'remote': remote,
'direction': direction})
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'neutron'
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
def _list(self, **filters):
secgroups = self.client.list_security_groups(**filters)
return [SecurityGroup(sg) for sg in secgroups.get('security_groups')]
@profiler.trace
def list(self):
tenant_id = self.request.user.tenant_id
return self._list(tenant_id=tenant_id)
def _sg_name_dict(self, sg_id, rules):
"""Create a mapping dict from secgroup id to its name."""
related_ids = set([sg_id])
related_ids |= set(filter(None, [r['remote_group_id'] for r in rules]))
related_sgs = self.client.list_security_groups(id=related_ids,
fields=['id', 'name'])
related_sgs = related_sgs.get('security_groups')
return dict((sg['id'], sg['name']) for sg in related_sgs)
@profiler.trace
def get(self, sg_id):
secgroup = self.client.show_security_group(sg_id).get('security_group')
sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules'])
return SecurityGroup(secgroup, sg_dict)
@profiler.trace
def create(self, name, desc):
body = {'security_group': {'name': name,
'description': desc,
'tenant_id': self.request.user.project_id}}
secgroup = self.client.create_security_group(body)
return SecurityGroup(secgroup.get('security_group'))
@profiler.trace
def update(self, sg_id, name, desc):
body = {'security_group': {'name': name,
'description': desc}}
secgroup = self.client.update_security_group(sg_id, body)
return SecurityGroup(secgroup.get('security_group'))
@profiler.trace
def delete(self, sg_id):
self.client.delete_security_group(sg_id)
@profiler.trace
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
if not cidr:
cidr = None
if from_port < 0:
from_port = None
if to_port < 0:
to_port = None
if isinstance(ip_protocol, int) and ip_protocol < 0:
ip_protocol = None
body = {'security_group_rule':
{'security_group_id': parent_group_id,
'direction': direction,
'ethertype': ethertype,
'protocol': ip_protocol,
'port_range_min': from_port,
'port_range_max': to_port,
'remote_ip_prefix': cidr,
'remote_group_id': group_id}}
try:
rule = self.client.create_security_group_rule(body)
except neutron_exc.Conflict:
raise exceptions.Conflict(_('Security group rule already exists.'))
rule = rule.get('security_group_rule')
sg_dict = self._sg_name_dict(parent_group_id, [rule])
return SecurityGroupRule(rule, sg_dict)
@profiler.trace
def rule_delete(self, sgr_id):
self.client.delete_security_group_rule(sgr_id)
@profiler.trace
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
ports = port_list(self.request, device_id=instance_id)
sg_ids = []
for p in ports:
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else []
@profiler.trace
def update_instance_security_group(self, instance_id,
new_security_group_ids):
ports = port_list(self.request, device_id=instance_id)
for p in ports:
params = {'security_groups': new_security_group_ids}
port_update(self.request, p.id, **params)
class FloatingIp(base.APIDictWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id',
'instance_type', 'pool']
def __init__(self, fip):
fip['ip'] = fip['floating_ip_address']
fip['fixed_ip'] = fip['fixed_ip_address']
fip['pool'] = fip['floating_network_id']
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
pass
class FloatingIpTarget(base.APIDictWrapper):
pass
class FloatingIpManager(network_base.FloatingIpManager):
device_owner_map = {
'compute:': 'compute',
'neutron:LOADBALANCER': 'loadbalancer',
}
def __init__(self, request):
self.request = request
self.client = neutronclient(request)
@profiler.trace
def list_pools(self):
search_opts = {'router:external': True}
return [FloatingIpPool(pool) for pool
in self.client.list_networks(**search_opts).get('networks')]
def _get_instance_type_from_device_owner(self, device_owner):
for key, value in self.device_owner_map.items():
if device_owner.startswith(key):
return value
return device_owner
def _set_instance_info(self, fip, port=None):
if fip['port_id']:
if not port:
port = port_get(self.request, fip['port_id'])
fip['instance_id'] = port.device_id
fip['instance_type'] = self._get_instance_type_from_device_owner(
port.device_owner)
else:
fip['instance_id'] = None
fip['instance_type'] = None
@profiler.trace
def list(self, all_tenants=False, **search_opts):
if not all_tenants:
tenant_id = self.request.user.tenant_id
# In Neutron, list_floatingips returns Floating IPs from
# all tenants when the API is called with admin role, so
# we need to filter them with tenant_id.
search_opts['tenant_id'] = tenant_id
port_search_opts = {'tenant_id': tenant_id}
else:
port_search_opts = {}
fips = self.client.list_floatingips(**search_opts)
fips = fips.get('floatingips')
# Get port list to add instance_id to floating IP list
# instance_id is stored in device_id attribute
ports = port_list(self.request, **port_search_opts)
port_dict = collections.OrderedDict([(p['id'], p) for p in ports])
for fip in fips:
self._set_instance_info(fip, port_dict.get(fip['port_id']))
return [FloatingIp(fip) for fip in fips]
@profiler.trace
def get(self, floating_ip_id):
fip = self.client.show_floatingip(floating_ip_id).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
@profiler.trace
def allocate(self, pool, tenant_id=None, **params):
if not tenant_id:
tenant_id = self.request.user.project_id
create_dict = {'floating_network_id': pool,
'tenant_id': tenant_id}
if 'floating_ip_address' in params:
create_dict['floating_ip_address'] = params['floating_ip_address']
fip = self.client.create_floatingip(
{'floatingip': create_dict}).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
@profiler.trace
def release(self, floating_ip_id):
self.client.delete_floatingip(floating_ip_id)
@profiler.trace
def associate(self, floating_ip_id, port_id):
# NOTE: In Neutron Horizon floating IP support, port_id is
# "<port_id>_<ip_address>" format to identify multiple ports.
pid, ip_address = port_id.split('_', 1)
update_dict = {'port_id': pid,
'fixed_ip_address': ip_address}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
@profiler.trace
def disassociate(self, floating_ip_id):
update_dict = {'port_id': None}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
def _get_reachable_subnets(self, ports):
if not is_enabled_by_config('enable_fip_topology_check', True):
# All subnets are reachable from external network
return set(
p.fixed_ips[0]['subnet_id'] for p in ports if p.fixed_ips
)
# Retrieve subnet list reachable from external network
ext_net_ids = [ext_net.id for ext_net in self.list_pools()]
gw_routers = [r.id for r in router_list(self.request)
if (r.external_gateway_info and
r.external_gateway_info.get('network_id')
in ext_net_ids)]
reachable_subnets = set([p.fixed_ips[0]['subnet_id'] for p in ports
if ((p.device_owner in
ROUTER_INTERFACE_OWNERS)
and (p.device_id in gw_routers))])
# we have to include any shared subnets as well because we may not
# have permission to see the router interface to infer connectivity
shared = set([s.id for n in network_list(self.request, shared=True)
for s in n.subnets])
return reachable_subnets | shared
@profiler.trace
def list_targets(self):
tenant_id = self.request.user.tenant_id
ports = port_list(self.request, tenant_id=tenant_id)
servers, has_more = nova.server_list(self.request, detailed=False)
server_dict = collections.OrderedDict(
[(s.id, s.name) for s in servers])
reachable_subnets = self._get_reachable_subnets(ports)
targets = []
for p in ports:
# Remove network ports from Floating IP targets
if p.device_owner.startswith('network:'):
continue
port_id = p.id
server_name = server_dict.get(p.device_id)
for ip in p.fixed_ips:
if ip['subnet_id'] not in reachable_subnets:
continue
target = {'name': '%s: %s' % (server_name, ip['ip_address']),
'id': '%s_%s' % (port_id, ip['ip_address']),
'port_id': port_id,
'instance_id': p.device_id}
targets.append(FloatingIpTarget(target))
return targets
def _target_ports_by_instance(self, instance_id):
if not instance_id:
return None
search_opts = {'device_id': instance_id}
return port_list(self.request, **search_opts)
@profiler.trace
def get_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
targets = [target for target in target_list
if target['instance_id'] == instance_id]
if not targets:
return None
return targets[0]['id']
else:
# In Neutron one port can have multiple ip addresses, so this
# method picks up the first one and generate target id.
ports = self._target_ports_by_instance(instance_id)
if not ports:
return None
return '{0}_{1}'.format(ports[0].id,
ports[0].fixed_ips[0]['ip_address'])
@profiler.trace
def list_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
return [target['id'] for target in target_list
if target['instance_id'] == instance_id]
else:
ports = self._target_ports_by_instance(instance_id)
return ['{0}_{1}'.format(p.id, p.fixed_ips[0]['ip_address'])
for p in ports]
def is_simple_associate_supported(self):
# NOTE: There are two reason that simple association support
# needs more considerations. (1) Neutron does not support the
# default floating IP pool at the moment. It can be avoided
# in case where only one floating IP pool exists.
# (2) Neutron floating IP is associated with each VIF and
# we need to check whether such VIF is only one for an instance
# to enable simple association support.
return False
def is_supported(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def get_ipver_str(ip_version):
"""Convert an ip version number to a human-friendly string."""
return IP_VERSION_DICT.get(ip_version, '')
@memoized
def neutronclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
c = neutron_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure, ca_cert=cacert)
return c
@profiler.trace
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
"""
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many filter values.
# Use the excess attribute of the exception to know how many
# filter values can be inserted into a single request.
# We consider only the filter condition from (filter_attr,
# filter_values) and do not consider other filter conditions
# which may be specified in **params.
if type(filter_values) != list:
filter_values = [filter_values]
# Length of each query filter is:
# <key>=<value>& (e.g., id=<uuid>)
# The length will be key_len + value_maxlen + 2
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len // filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources
@profiler.trace
def network_list(request, **params):
LOG.debug("network_list(): params=%s", params)
networks = neutronclient(request).list_networks(**params).get('networks')
# Get subnet list to expand subnet info in network list.
subnets = subnet_list(request)
subnet_dict = dict([(s['id'], s) for s in subnets])
# Expand subnet list from subnet_id to values.
for n in networks:
# Due to potential timing issues, we can't assume the subnet_dict data
# is in sync with the network data.
n['subnets'] = [subnet_dict[s] for s in n.get('subnets', []) if
s in subnet_dict]
return [Network(n) for n in networks]
@profiler.trace
def network_list_for_tenant(request, tenant_id, include_external=False,
**params):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If requested_networks specified, it searches requested_networks only.
"""
LOG.debug("network_list_for_tenant(): tenant_id=%(tenant_id)s, "
"params=%(params)s", {'tenant_id': tenant_id, 'params': params})
networks = []
shared = params.get('shared')
if shared is not None:
del params['shared']
if shared in (None, False):
# If a user has admin role, network list returned by Neutron API
# contains networks that do not belong to that tenant.
# So we need to specify tenant_id when calling network_list().
networks += network_list(request, tenant_id=tenant_id,
shared=False, **params)
if shared in (None, True):
# In the current Neutron API, there is no way to retrieve
# both owner networks and public networks in a single API call.
networks += network_list(request, shared=True, **params)
params['router:external'] = params.get('router:external', True)
if params['router:external'] and include_external:
if shared is not None:
params['shared'] = shared
fetched_net_ids = [n.id for n in networks]
# Retrieves external networks when router:external is not specified
# in (filtering) params or router:external=True filter is specified.
# When router:external=False is specified there is no need to query
# networking API because apparently nothing will match the filter.
ext_nets = network_list(request, **params)
networks += [n for n in ext_nets if
n.id not in fetched_net_ids]
return networks
@profiler.trace
def network_get(request, network_id, expand_subnet=True, **params):
LOG.debug("network_get(): netid=%(network_id)s, params=%(params)s",
{'network_id': network_id, 'params': params})
network = neutronclient(request).show_network(network_id,
**params).get('network')
if expand_subnet:
if request.user.tenant_id == network['tenant_id'] or network['shared']:
# Since the number of subnets per network must be small,
# call subnet_get() for each subnet instead of calling
# subnet_list() once.
network['subnets'] = [subnet_get(request, sid)
for sid in network['subnets']]
return Network(network)
@profiler.trace
def network_create(request, **kwargs):
"""Create a network object.
:param request: request context
:param tenant_id: (optional) tenant id of the network created
:param name: (optional) name of the network created
:returns: Network object
"""
LOG.debug("network_create(): kwargs = %s", kwargs)
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body = {'network': kwargs}
network = neutronclient(request).create_network(body=body).get('network')
return Network(network)
@profiler.trace
def network_update(request, network_id, **kwargs):
LOG.debug("network_update(): netid=%(network_id)s, params=%(params)s",
{'network_id': network_id, 'params': kwargs})
body = {'network': kwargs}
network = neutronclient(request).update_network(network_id,
body=body).get('network')
return Network(network)
@profiler.trace
def network_delete(request, network_id):
LOG.debug("network_delete(): netid=%s", network_id)
neutronclient(request).delete_network(network_id)
@profiler.trace
def subnet_list(request, **params):
LOG.debug("subnet_list(): params=%s", params)
subnets = neutronclient(request).list_subnets(**params).get('subnets')
return [Subnet(s) for s in subnets]
@profiler.trace
def subnet_get(request, subnet_id, **params):
LOG.debug("subnet_get(): subnetid=%(subnet_id)s, params=%(params)s",
{'subnet_id': subnet_id, 'params': params})
subnet = neutronclient(request).show_subnet(subnet_id,
**params).get('subnet')
return Subnet(subnet)
@profiler.trace
def subnet_create(request, network_id, **kwargs):
"""Create a subnet on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param cidr: (optional) subnet IP address range
:param ip_version: (optional) IP version (4 or 6)
:param gateway_ip: (optional) IP address of gateway
:param tenant_id: (optional) tenant id of the subnet created
:param name: (optional) name of the subnet created
:param subnetpool_id: (optional) subnetpool to allocate prefix from
:param prefixlen: (optional) length of prefix to allocate
:returns: Subnet object
Although both cidr+ip_version and subnetpool_id+preifxlen is listed as
optional you MUST pass along one of the combinations to get a successful
result.
"""
LOG.debug("subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s",
{'network_id': network_id, 'kwargs': kwargs})
body = {'subnet': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnet'].update(kwargs)
subnet = neutronclient(request).create_subnet(body=body).get('subnet')
return Subnet(subnet)
@profiler.trace
def subnet_update(request, subnet_id, **kwargs):
LOG.debug("subnet_update(): subnetid=%(subnet_id)s, kwargs=%(kwargs)s",
{'subnet_id': subnet_id, 'kwargs': kwargs})
body = {'subnet': kwargs}
subnet = neutronclient(request).update_subnet(subnet_id,
body=body).get('subnet')
return Subnet(subnet)
@profiler.trace
def subnet_delete(request, subnet_id):
LOG.debug("subnet_delete(): subnetid=%s", subnet_id)
neutronclient(request).delete_subnet(subnet_id)
@profiler.trace
def subnetpool_list(request, **params):
LOG.debug("subnetpool_list(): params=%s", params)
subnetpools = \
neutronclient(request).list_subnetpools(**params).get('subnetpools')
return [SubnetPool(s) for s in subnetpools]
@profiler.trace
def subnetpool_get(request, subnetpool_id, **params):
LOG.debug("subnetpool_get(): subnetpoolid=%(subnetpool_id)s, "
"params=%(params)s", {'subnetpool_id': subnetpool_id,
'params': params})
subnetpool = \
neutronclient(request).show_subnetpool(subnetpool_id,
**params).get('subnetpool')
return SubnetPool(subnetpool)
@profiler.trace
def subnetpool_create(request, name, prefixes, **kwargs):
"""Create a subnetpool.
ip_version is auto-detected in back-end.
Parameters:
request -- Request context
name -- Name for subnetpool
prefixes -- List of prefixes for pool
Keyword Arguments (optional):
min_prefixlen -- Minimum prefix length for allocations from pool
max_prefixlen -- Maximum prefix length for allocations from pool
default_prefixlen -- Default prefix length for allocations from pool
default_quota -- Default quota for allocations from pool
shared -- Subnetpool should be shared (Admin-only)
tenant_id -- Owner of subnetpool
Returns:
SubnetPool object
"""
LOG.debug("subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, "
"kwargs=%(kwargs)s", {'name': name, 'prefixes': prefixes,
'kwargs': kwargs})
body = {'subnetpool':
{'name': name,
'prefixes': prefixes,
}
}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnetpool'].update(kwargs)
subnetpool = \
neutronclient(request).create_subnetpool(body=body).get('subnetpool')
return SubnetPool(subnetpool)
@profiler.trace
def subnetpool_update(request, subnetpool_id, **kwargs):
LOG.debug("subnetpool_update(): subnetpoolid=%(subnetpool_id)s, "
"kwargs=%(kwargs)s", {'subnetpool_id': subnetpool_id,
'kwargs': kwargs})
body = {'subnetpool': kwargs}
subnetpool = \
neutronclient(request).update_subnetpool(subnetpool_id,
body=body).get('subnetpool')
return SubnetPool(subnetpool)
@profiler.trace
def subnetpool_delete(request, subnetpool_id):
LOG.debug("subnetpool_delete(): subnetpoolid=%s", subnetpool_id)
return neutronclient(request).delete_subnetpool(subnetpool_id)
@profiler.trace
def port_list(request, **params):
LOG.debug("port_list(): params=%s", params)
ports = neutronclient(request).list_ports(**params).get('ports')
return [Port(p) for p in ports]
@profiler.trace
def port_get(request, port_id, **params):
LOG.debug("port_get(): portid=%(port_id)s, params=%(params)s",
{'port_id': port_id, 'params': params})
port = neutronclient(request).show_port(port_id, **params).get('port')
return Port(port)
def unescape_port_kwargs(**kwargs):
for key in kwargs:
if '__' in key:
kwargs[':'.join(key.split('__'))] = kwargs.pop(key)
return kwargs
@profiler.trace
def port_create(request, network_id, **kwargs):
"""Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object
"""
LOG.debug("port_create(): netid=%(network_id)s, kwargs=%(kwargs)s",
{'network_id': network_id, 'kwargs': kwargs})
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['port'].update(kwargs)
port = neutronclient(request).create_port(body=body).get('port')
return Port(port)
@profiler.trace
def port_delete(request, port_id):
LOG.debug("port_delete(): portid=%s", port_id)
neutronclient(request).delete_port(port_id)
@profiler.trace
def port_update(request, port_id, **kwargs):
LOG.debug("port_update(): portid=%(port_id)s, kwargs=%(kwargs)s",
{'port_id': port_id, 'kwargs': kwargs})
kwargs = unescape_port_kwargs(**kwargs)
body = {'port': kwargs}
port = neutronclient(request).update_port(port_id, body=body).get('port')
return Port(port)
@profiler.trace
def router_create(request, **kwargs):
LOG.debug("router_create():, kwargs=%s", kwargs)
body = {'router': {}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['router'].update(kwargs)
router = neutronclient(request).create_router(body=body).get('router')
return Router(router)
@profiler.trace
def router_update(request, r_id, **kwargs):
LOG.debug("router_update(): router_id=%(r_id)s, kwargs=%(kwargs)s",
{'r_id': r_id, 'kwargs': kwargs})
body = {'router': {}}
body['router'].update(kwargs)
router = neutronclient(request).update_router(r_id, body=body)
return Router(router['router'])
@profiler.trace
def router_get(request, router_id, **params):
router = neutronclient(request).show_router(router_id,
**params).get('router')
return Router(router)
@profiler.trace
def router_list(request, **params):
routers = neutronclient(request).list_routers(**params).get('routers')
return [Router(r) for r in routers]
@profiler.trace
def router_list_on_l3_agent(request, l3_agent_id, **params):
routers = neutronclient(request).\
list_routers_on_l3_agent(l3_agent_id,
**params).get('routers')
return [Router(r) for r in routers]
@profiler.trace
def router_delete(request, router_id):
neutronclient(request).delete_router(router_id)
@profiler.trace
def router_add_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
client = neutronclient(request)
return client.add_interface_router(router_id, body)
@profiler.trace
def router_remove_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
body['subnet_id'] = subnet_id
if port_id:
body['port_id'] = port_id
neutronclient(request).remove_interface_router(router_id, body)
@profiler.trace
def router_add_gateway(request, router_id, network_id):
body = {'network_id': network_id}
neutronclient(request).add_gateway_router(router_id, body)
@profiler.trace
def router_remove_gateway(request, router_id):
neutronclient(request).remove_gateway_router(router_id)
@profiler.trace
def router_static_route_list(request, router_id=None):
router = router_get(request, router_id)
try:
routes = [RouterStaticRoute(r) for r in router.routes]
except AttributeError:
LOG.debug("router_static_route_list(): router_id=%(router_id)s, "
"router=%(router)s", {'router_id': router_id,
'router': router})
return []
return routes
@profiler.trace
def router_static_route_remove(request, router_id, route_ids):
currentroutes = router_static_route_list(request, router_id=router_id)
newroutes = []
for oldroute in currentroutes:
if oldroute.id not in route_ids:
newroutes.append({'nexthop': oldroute.nexthop,
'destination': oldroute.destination})
body = {'routes': newroutes}
new = router_update(request, router_id, **body)
return new
@profiler.trace
def router_static_route_add(request, router_id, newroute):
body = {}
currentroutes = router_static_route_list(request, router_id=router_id)
body['routes'] = [newroute] + [{'nexthop': r.nexthop,
'destination': r.destination}
for r in currentroutes]
new = router_update(request, router_id, **body)
return new
@profiler.trace
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(neutronclient(request).show_quota(tenant_id)['quota'])
@profiler.trace
def tenant_quota_update(request, tenant_id, **kwargs):
quotas = {'quota': kwargs}
return neutronclient(request).update_quota(tenant_id, quotas)
@profiler.trace
def agent_list(request, **params):
agents = neutronclient(request).list_agents(**params)
return [Agent(a) for a in agents['agents']]
@profiler.trace
def list_dhcp_agent_hosting_networks(request, network, **params):
agents = neutronclient(request).list_dhcp_agent_hosting_networks(network,
**params)
return [Agent(a) for a in agents['agents']]
@profiler.trace
def list_l3_agent_hosting_router(request, router, **params):
agents = neutronclient(request).list_l3_agent_hosting_routers(router,
**params)
return [Agent(a) for a in agents['agents']]
@profiler.trace
def show_network_ip_availability(request, network_id):
ip_availability = neutronclient(request).show_network_ip_availability(
network_id)
return ip_availability
@profiler.trace
def add_network_to_dhcp_agent(request, dhcp_agent, network_id):
body = {'network_id': network_id}
return neutronclient(request).add_network_to_dhcp_agent(dhcp_agent, body)
@profiler.trace
def remove_network_from_dhcp_agent(request, dhcp_agent, network_id):
return neutronclient(request).remove_network_from_dhcp_agent(dhcp_agent,
network_id)
@profiler.trace
def provider_list(request):
providers = neutronclient(request).list_service_providers()
return providers['service_providers']
# TODO(pkarikh) need to uncomment when osprofiler will have no
# issues with unicode in:
# openstack_dashboard/test/test_data/nova_data.py#L470 data
# @profiler.trace
def servers_update_addresses(request, servers, all_tenants=False):
"""Retrieve servers networking information from Neutron if enabled.
Should be used when up to date networking information is required,
and Nova's networking info caching mechanism is not fast enough.
"""
# Get all (filtered for relevant servers) information from Neutron
try:
ports = list_resources_with_long_filters(
port_list, 'device_id', [instance.id for instance in servers],
request=request)
fips = FloatingIpManager(request)
if fips.is_supported():
floating_ips = list_resources_with_long_filters(
fips.list, 'port_id', [port.id for port in ports],
all_tenants=all_tenants)
else:
floating_ips = []
networks = list_resources_with_long_filters(
network_list, 'id', set([port.network_id for port in ports]),
request=request)
except Exception:
error_message = _('Unable to connect to Neutron.')
LOG.error(error_message)
messages.error(request, error_message)
return
# Map instance to its ports
instances_ports = collections.defaultdict(list)
for port in ports:
instances_ports[port.device_id].append(port)
# Map port to its floating ips
ports_floating_ips = collections.defaultdict(list)
for fip in floating_ips:
ports_floating_ips[fip.port_id].append(fip)
# Map network id to its name
network_names = dict(((network.id, network.name) for network in networks))
for server in servers:
try:
addresses = _server_get_addresses(
request,
server,
instances_ports,
ports_floating_ips,
network_names)
except Exception as e:
LOG.error(six.text_type(e))
else:
server.addresses = addresses
def _server_get_addresses(request, server, ports, floating_ips, network_names):
def _format_address(mac, ip, type):
try:
version = netaddr.IPAddress(ip).version
except Exception:
error_message = _('Unable to parse IP address %s.') % ip
LOG.error(error_message)
messages.error(request, error_message)
raise
return {u'OS-EXT-IPS-MAC:mac_addr': mac,
u'version': version,
u'addr': ip,
u'OS-EXT-IPS:type': type}
addresses = collections.defaultdict(list)
instance_ports = ports.get(server.id, [])
for port in instance_ports:
network_name = network_names.get(port.network_id)
if network_name is not None:
for fixed_ip in port.fixed_ips:
addresses[network_name].append(
_format_address(port.mac_address,
fixed_ip['ip_address'],
u'fixed'))
port_fips = floating_ips.get(port.id, [])
for fip in port_fips:
addresses[network_name].append(
_format_address(port.mac_address,
fip.floating_ip_address,
u'floating'))
return dict(addresses)
@profiler.trace
@memoized
def list_extensions(request):
try:
extensions_list = neutronclient(request).list_extensions()
except exceptions.ServiceCatalogException:
return {}
if 'extensions' in extensions_list:
return tuple(extensions_list['extensions'])
else:
return ()
@profiler.trace
@memoized
def is_extension_supported(request, extension_alias):
extensions = list_extensions(request)
for extension in extensions:
if extension['alias'] == extension_alias:
return True
else:
return False
def is_enabled_by_config(name, default=True):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get(name, default)
@memoized
def is_service_enabled(request, config_name, ext_name):
return (is_enabled_by_config(config_name) and
is_extension_supported(request, ext_name))
@memoized
def is_quotas_extension_supported(request):
return (is_enabled_by_config('enable_quotas', False) and
is_extension_supported(request, 'quotas'))
@memoized
def is_router_enabled(request):
return (is_enabled_by_config('enable_router') and
is_extension_supported(request, 'router'))
# FEATURE_MAP is used to define:
# - related neutron extension name (key: "extension")
# - corresponding dashboard config (key: "config")
# - RBAC policies (key: "poclies")
# If a key is not contained, the corresponding permission check is skipped.
FEATURE_MAP = {
'dvr': {
'extension': 'dvr',
'config': {
'name': 'enable_distributed_router',
'default': False,
},
'policies': {
'get': 'get_router:distributed',
'create': 'create_router:distributed',
'update': 'update_router:distributed',
}
},
'l3-ha': {
'extension': 'l3-ha',
'config': {'name': 'enable_ha_router',
'default': False},
'policies': {
'get': 'get_router:ha',
'create': 'create_router:ha',
'update': 'update_router:ha',
}
},
}
def get_feature_permission(request, feature, operation=None):
"""Check if a feature-specific field can be displayed.
This method check a permission for a feature-specific field.
Such field is usually provided through Neutron extension.
:param request: Request Object
:param feature: feature name defined in FEATURE_MAP
:param operation (optional): Operation type. The valid value should be
defined in FEATURE_MAP[feature]['policies']
It must be specified if FEATURE_MAP[feature] has 'policies'.
"""
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
feature_info = FEATURE_MAP.get(feature)
if not feature_info:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The requested feature '%(feature)s' is unknown. "
"Please make sure to specify a feature defined "
"in FEATURE_MAP."))
# Check dashboard settings
feature_config = feature_info.get('config')
if feature_config:
if not network_config.get(feature_config['name'],
feature_config['default']):
return False
# Check policy
feature_policies = feature_info.get('policies')
if feature_policies:
policy_name = feature_policies.get(operation)
if not policy_name:
# Translators: Only used inside Horizon code and invisible to users
raise ValueError(_("The 'operation' parameter for "
"get_feature_permission '%(feature)s' "
"is invalid. It should be one of %(allowed)s")
% {'feature': feature,
'allowed': ' '.join(feature_policies.keys())})
role = (('network', policy_name),)
if not policy.check(role, request):
return False
# Check if a required extension is enabled
feature_extension = feature_info.get('extension')
if feature_extension:
try:
return is_extension_supported(request, feature_extension)
except Exception:
msg = (_("Failed to check Neutron '%s' extension is not supported")
% feature_extension)
LOG.info(msg)
return False
# If all checks are passed, now a given feature is allowed.
return True
| apache-2.0 | -2,239,316,490,903,056,400 | 35.944151 | 79 | 0.605258 | false | 3.978786 | true | false | false |
ScalaInc/exp-python2-sdk | exp_sdk/exceptions.py | 1 | 1083 |
import traceback
import logging
logger = logging.getLogger('exp')
class ExpError (Exception):
def __init__ (self, message):
self.message = message
def __str__ (self):
return self.message
class AuthenticationError (ExpError):
pass
class UnexpectedError (ExpError):
def __init__ (self, *args, **kwargs):
logger.debug('An unexpected error occured:')
logger.debug(traceback.format_exc())
super(UnexpectedError, self).__init__(*args, **kwargs)
# Cannot execute desired action.
class RuntimeError(ExpError):
def __init__ (self, message):
logger.debug('A runtime error has occured: %s' % message)
def __str__ (self):
return self.message
class ApiError(ExpError):
def __init__(self, code=None, message=None, status_code=None, payload=None):
self.message = message or 'An unknown error has occurred.'
self.code = code or 'unknown.error'
self.status_code = status_code
self.payload = payload
def __str__(self):
return '%s: %s \n %s' % (self.code, self.message, self.payload)
class NetworkError(ExpError): pass
| mit | -8,175,853,205,968,226,000 | 21.102041 | 78 | 0.674977 | false | 3.634228 | false | false | false |
PyAbel/PyAbel | doc/transform_methods/rbasex-SVD.py | 1 | 1324 | from __future__ import division, print_function
import numpy as np
from scipy.linalg import inv, svd
import matplotlib.pyplot as plt
from abel.rbasex import _bs_rbasex
Rmax = 40
# SVD for 0th-order inverse Abel transform
P, = _bs_rbasex(Rmax, 0, False)
A = inv(P.T)
V, s, UT = svd(A)
# setup x axis
def setx():
plt.xlim((0, Rmax))
plt.xticks([0, 1/4 * Rmax, 1/2 * Rmax, 3/4 * Rmax, Rmax],
['$0$', '', '', '', '$r_{\\rm max}$'])
# plot i-th +- 0, 1 singular vectors
def plotu(i, title):
plt.title('$\\mathbf{v}_i,\\quad i = ' + title + '$')
i = int(i)
plt.plot(V[:, i - 1], '#DD0000')
plt.plot(V[:, i], '#00AA00')
plt.plot(V[:, i + 1], '#0000FF')
setx()
fig = plt.figure(figsize=(6, 6), frameon=False)
# singular values
plt.subplot(321)
plt.title('$\\sigma_i$')
plt.plot(s, 'k')
setx()
plt.ylim(bottom=0)
# vectors near 0
plt.subplot(322)
plotu(1, '0, 1, 2')
# vectors near 1/4
plt.subplot(323)
plotu(1/4 * Rmax, '\\frac{1}{4} r_{\\rm max} \\pm 0, 1')
# vectors near middle
plt.subplot(324)
plotu(1/2 * Rmax, '\\frac{1}{2} r_{\\rm max} \\pm 0, 1')
# vectors near 3/4
plt.subplot(325)
plotu(3/4 * Rmax, '\\frac{3}{4} r_{\\rm max} \\pm 0, 1')
# vectors near end
plt.subplot(326)
plotu(Rmax - 1, 'r_{\\rm max} - 2, 1, 0')
plt.tight_layout()
#plt.savefig('rbasex-SVD.svg')
#plt.show()
| mit | 8,835,070,110,759,134,000 | 20.015873 | 59 | 0.590634 | false | 2.339223 | false | false | false |
nolram/NewsReader-Django | Crawler/migrations/0001_initial.py | 1 | 5262 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categorias',
fields=[
('id_categoria', models.AutoField(primary_key=True, serialize=False)),
('categoria', models.CharField(unique=True, db_index=True, max_length=100)),
],
),
migrations.CreateModel(
name='Imagens',
fields=[
('id_imagem', models.AutoField(primary_key=True, serialize=False)),
('img_cover', sorl.thumbnail.fields.ImageField(upload_to='', null=True)),
('data_inserido', models.DateTimeField(auto_now_add=True)),
('data_modificado', models.DateTimeField(auto_now=True)),
('img_link_orig', models.URLField(unique=True, db_index=True, max_length=700)),
],
),
migrations.CreateModel(
name='LinksRSS',
fields=[
('id_links_rss', models.AutoField(primary_key=True, serialize=False)),
('link_rss', models.URLField(db_index=True, max_length=600)),
('data_adicionado', models.DateTimeField(auto_now_add=True)),
('data_modificado', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Postagens',
fields=[
('id_postagem', models.AutoField(primary_key=True, serialize=False)),
('titulo', models.CharField(max_length=500)),
('link', models.URLField(unique=True, db_index=True, max_length=700)),
('link_origi', models.URLField(null=True, unique=True, db_index=True, max_length=700)),
('texto', models.TextField(null=True)),
('data_adicionado', models.DateTimeField(auto_now_add=True)),
('data_modificado', models.DateTimeField(auto_now=True)),
('horario_postagem_site', models.DateTimeField(null=True)),
('fk_imagem', models.ForeignKey(to='Crawler.Imagens', related_name='fk_imagem_postagem')),
('fk_rss', models.ForeignKey(to='Crawler.LinksRSS', related_name='fk_rss_postagem')),
],
),
migrations.CreateModel(
name='RSSCategorias',
fields=[
('id_rss_categorias', models.AutoField(primary_key=True, serialize=False)),
('fk_categoria', models.ForeignKey(to='Crawler.Categorias', related_name='fk_categoria_rss')),
('fk_rss', models.ForeignKey(to='Crawler.LinksRSS', related_name='fk_rss')),
],
),
migrations.CreateModel(
name='Sites',
fields=[
('id_sites', models.AutoField(primary_key=True, serialize=False)),
('titulo', models.CharField(db_index=True, max_length=150)),
('descricao', models.TextField()),
('link', models.URLField(db_index=True, max_length=600)),
('idioma', models.CharField(max_length=30)),
('data_adicionado', models.DateTimeField(auto_now_add=True)),
('data_modificado', models.DateTimeField(auto_now=True)),
('fk_logo', models.ForeignKey(null=True, to='Crawler.Imagens')),
],
),
migrations.CreateModel(
name='SitesCategorias',
fields=[
('id_sites_categorias', models.AutoField(primary_key=True, serialize=False)),
('fk_categoria', models.ForeignKey(to='Crawler.Categorias', related_name='fk_categoria')),
('fk_site', models.ForeignKey(to='Crawler.Sites', related_name='fk_site')),
],
),
migrations.CreateModel(
name='Tags',
fields=[
('id_tag', models.AutoField(primary_key=True, serialize=False)),
('tag', models.CharField(unique=True, db_index=True, max_length=100)),
('contador', models.PositiveIntegerField(default=1)),
],
),
migrations.CreateModel(
name='TagsPostagens',
fields=[
('id_tags_postagens', models.AutoField(primary_key=True, serialize=False)),
('fk_postagem', models.ForeignKey(to='Crawler.Postagens', related_name='tp_postagem')),
('fk_tag', models.ForeignKey(to='Crawler.Tags', related_name='tp_tags')),
],
),
migrations.AddField(
model_name='linksrss',
name='fk_sites',
field=models.ForeignKey(to='Crawler.Sites'),
),
migrations.AlterUniqueTogether(
name='tagspostagens',
unique_together=set([('fk_postagem', 'fk_tag')]),
),
migrations.AlterUniqueTogether(
name='sitescategorias',
unique_together=set([('fk_site', 'fk_categoria')]),
),
migrations.AlterUniqueTogether(
name='rsscategorias',
unique_together=set([('fk_rss', 'fk_categoria')]),
),
]
| mit | -1,881,030,294,619,357,200 | 43.974359 | 110 | 0.549221 | false | 4.107728 | false | false | false |
deathmetalland/IkaLog | ikalog/outputs/webserver/preview.py | 3 | 2324 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import time
import cv2
class PreviewRequestHandler(object):
def __init__(self, http_handler):
self._http_handler = http_handler
self._plugin = http_handler.server.parent
self._frame = None
self._new_image = False
self._stopped = False
self._http_handler.send_response(200)
self._http_handler.send_header(
'Content-type', 'multipart/x-mixed-replace; boundary=--frame_boundary')
self._http_handler.end_headers()
self._plugin._listeners.append(self)
while not self._stopped:
time.sleep(0.05)
if (self._frame is None):
continue
# FIXME: JPEG data should be shared among connections, for
# performance
result, jpeg = cv2.imencode('.jpg', self._frame)
if not result:
continue
jpeg_length = len(jpeg)
self.new_image = False
self._http_handler.wfile.write(
'--frame_boundary\r\n'.encode('utf-8')
)
self._http_handler.send_header('Content-type', 'image/jpeg')
self._http_handler.send_header('Content-length', str(jpeg_length))
self._http_handler.end_headers()
self._http_handler.wfile.write(jpeg)
self._plugin._listeners.remove(self)
def on_event(self, event_name, context, params=None):
if event_name == 'on_show_preview':
self._frame = context['engine']['frame']
self._new_image = (self._frame is not None)
elif event_name == 'on_stop':
self._stopped = True
| apache-2.0 | 7,743,394,388,451,449,000 | 30.405405 | 83 | 0.609294 | false | 3.912458 | false | false | false |
juggernate/pymel | pymel/util/conditions.py | 3 | 2066 | #------------------------------------------------------------------------------
# Condition objects - used for chaining together tests that yield True/False results
#------------------------------------------------------------------------------
class Condition(object):
"""
Used to chain together objects for conditional testing.
"""
class NO_DATA(Exception):
pass
def __init__(self, value=None):
self.value = value
def eval(self, data=NO_DATA):
return bool(self.value)
def __or__(self, other):
return Or(self, other)
def __ror__(self, other):
return Or(other, self)
def __and__(self, other):
return And(self, other)
def __rand__(self, other):
return And(other, self)
def __invert__(self):
return Inverse(self)
def __nonzero__(self):
return self.eval()
def __str__(self):
return str(self.value)
Always = Condition(True)
Never = Condition(False)
class Inverse(Condition):
def __init__(self, toInvert):
self.toInvert = toInvert
def eval(self, data=Condition.NO_DATA):
return not self.toInvert.eval(data)
def __str__(self):
return "not %s" % self.toInvert
class AndOrAbstract(Condition):
def __init__(self, *args):
self.args = []
for arg in args:
if isinstance(arg, self.__class__):
self.args.extend(arg.args)
else:
self.args.append(arg)
def eval(self, data=Condition.NO_DATA):
for arg in self.args:
if isinstance(arg, Condition):
val = arg.eval(data)
else:
val = bool(arg)
if val == self._breakEarly:
return self._breakEarly
return not self._breakEarly
def __str__(self):
return "(%s)" % self._strJoiner.join([str(x) for x in self.args])
class And(AndOrAbstract):
_breakEarly = False
_strJoiner = ' and '
class Or(AndOrAbstract):
_breakEarly = True
_strJoiner = ' or '
| bsd-3-clause | -2,844,658,792,059,718,700 | 23.595238 | 84 | 0.518877 | false | 4.02729 | false | false | false |
javgh/greenaddress-pos-tools | nfc/tag/tt1.py | 1 | 10645 | # -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2011-2013
# Stephen Tiedemann <[email protected]>,
# Alexander Knaub <[email protected]>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
import logging
log = logging.getLogger(__name__)
import nfc.tag
import nfc.clf
import nfc.ndef
class NDEF(object):
def __init__(self, tag):
self._tag = tag
self._msg = ''
self._cc = tag[8:12]
log.debug("capability container " + str(self._cc).encode("hex"))
self._skip = set(range(104, 120))
self.changed # force initial read
def _read_tlv(self, offset):
read_tlv = {
0x00: lambda x: x + 1,
0x01: self._read_lock_tlv,
0x02: self._read_memory_tlv,
0x03: self._read_ndef_tlv,
0xFE: lambda x: None
}.get(self._tag[offset], self._read_unknown_tlv)
return read_tlv(offset + 1)
def _read_unknown_tlv(self, offset):
log.debug("found unknown tlv")
length, offset = self._read_tlv_length(offset)
return offset + length
def _read_ndef_tlv(self, offset):
log.debug("ndef message tlv at 0x{0:0X}".format(offset-1))
self._ndef_tlv_offset = offset - 1
length, offset = self._read_tlv_length(offset)
log.debug("ndef message length is {0}".format(length))
self._capacity = (self._cc[2]+1)*8 - offset - len(self._skip)
if length < 255 and self._capacity >= 255:
self._capacity -= 2 # account for three byte length format
self._msg = bytearray()
while length > 0:
if not offset in self._skip:
self._msg.append(self._tag[offset])
length -= 1
offset += 1
return None
def _read_lock_tlv(self, offset):
log.debug("dynamic lock byte tlv at 0x{0:0X}".format(offset-1))
length, offset = self._read_tlv_length(offset)
value = self._tag[offset:offset+length]
page_offs = value[0] >> 4
byte_offs = value[0] & 0x0F
resv_size = ((value[1] - 1) / 8) + 1
page_size = 2 ** (value[2] & 0x0F)
resv_start = page_offs * page_size + byte_offs
self._skip.update(range(resv_start, resv_start + resv_size))
return offset + length
def _read_memory_tlv(self, offset):
log.debug("memory control tlv at 0x{0:0X}".format(offset-1))
length, offset = self._read_tlv_length(offset)
value = self._tag[offset:offset+length]
page_offs = value[0] >> 4
byte_offs = value[0] & 0x0F
resv_size = value[1]
page_size = 2 ** (value[2] & 0x0F)
resv_start = page_offs * page_size + byte_offs
self._skip.update(range(resv_start, resv_start + resv_size))
return offset + length
def _read_tlv_length(self, offset):
length = self._tag[offset]
if length == 255:
length = self._tag[offset+1] * 256 + self._tag[offset+2];
offset = offset + 2
if length < 256 or length == 0xFFFF:
raise ValueError("invalid tlv lenght value")
return length, offset + 1
@property
def version(self):
"""The version of the NDEF mapping."""
return "%d.%d" % (self._cc[1]>>4, self._cc[1]&0x0F)
@property
def capacity(self):
"""The maximum number of user bytes on the NDEF tag."""
return self._capacity
@property
def readable(self):
"""Is True if data can be read from the NDEF tag."""
return self._cc[3] & 0xF0 == 0x00
@property
def writeable(self):
"""Is True if data can be written to the NDEF tag."""
return self._cc[3] & 0x0F == 0x00
@property
def length(self):
"""NDEF message data length."""
return len(self._msg)
@property
def changed(self):
"""True if the message has changed since the read."""
if self.readable:
old_msg = self._msg[:] # make a copy
offset = 12
while offset is not None:
offset = self._read_tlv(offset)
return self._msg != old_msg
return False
@property
def message(self):
"""An NDEF message object (an empty record message if tag is empty)."""
if self.readable:
try: return nfc.ndef.Message(str(self._msg))
except nfc.ndef.parser_error: pass
return nfc.ndef.Message(nfc.ndef.Record())
@message.setter
def message(self, msg):
if not self.writeable:
raise nfc.tag.AccessError
data = bytearray(str(msg))
nlen = len(data)
if nlen > self.capacity:
raise nfc.tag.CapacityError
if nlen < self.capacity:
data = data + "\xFE"
with self._tag as tag:
tag[0x08] = 0x00
tag[0x09] = 0x10
tag[0x0B] = 0x00
offset = self._ndef_tlv_offset + 1
if len(data) < 255:
tag[offset] = nlen
offset += 1
else:
tag[offset] = 255
tag[offset+1] = nlen / 256
tag[offset+2] = nlen % 256
offset += 3
for octet in data:
while offset in self._skip:
offset += 1
tag[offset] = octet
offset += 1
with self._tag as tag:
tag[8] = 0xE1
class Type1Tag(object):
type = "Type1Tag"
def __init__(self, clf, target):
self.clf = clf
self.uid = target.uid
self._mmap = self.read_all()[2:]
self._sync = set()
self.ndef = None
if self[8] == 0xE1:
try: self.ndef = NDEF(self)
except Exception as error:
log.error("while reading ndef: {0!r}".format(error))
def __str__(self):
return "Type1Tag UID=" + str(self.uid).encode("hex")
def __getitem__(self, key):
if type(key) is int:
key = slice(key, key+1)
if not type(key) is slice:
raise TypeError("key must be of type int or slice")
if key.start > key.stop:
raise ValueError("start index is greater than stop index")
if key.stop > len(self._mmap):
for block in range(len(self._mmap)/8, key.stop/8):
self._mmap += self.read_block(block)
bytes = self._mmap[key.start:key.stop]
return bytes if len(bytes) > 1 else bytes[0]
def __setitem__(self, key, value):
if type(key) is int:
key = slice(key, key+1)
if type(key) is not slice:
raise TypeError("key must be of type int or slice")
if type(value) == int:
value = bytearray([value])
else:
value = bytearray(value)
if len(value) != key.stop - key.start:
raise ValueError("value and slice length do not match")
if key.stop > len(self._mmap):
self.__getitem__(key)
for i in xrange(key.start, key.stop):
self._mmap[i] = value[i-key.start]
self._sync.add(i)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
if self._mmap[10] < 15:
for i in sorted(self._sync):
self.write_byte(i, self._mmap[i])
self._sync.clear()
else:
while len(self._sync) > 0:
block = sorted(self._sync).pop(0) / 8
self.write_block(block, self._mmap[block<<3:(block+1)<<3])
self._sync -= set(range(block<<3, (block+1)<<3))
@property
def is_present(self):
"""Returns True if the tag is still within communication range."""
try:
data = self.transceive("\x78\x00\x00"+self.uid)
return data and len(data) == 6
except nfc.clf.DigitalProtocolError: return False
def transceive(self, data, timeout=0.1):
return self.clf.exchange(data, timeout)
def read_id(self):
"""Read header rom and all static memory bytes (blocks 0-14).
"""
log.debug("read all")
cmd = "\x78\x00\x00\x00\x00\x00\x00"
return self.transceive(cmd)
def read_all(self):
"""Read header rom and all static memory bytes (blocks 0-14).
"""
log.debug("read all")
cmd = "\x00\x00\x00" + self.uid
return self.transceive(cmd)
def read_byte(self, addr):
"""Read a single byte from static memory area (blocks 0-14).
"""
log.debug("read byte at address 0x{0:03X}".format(addr))
cmd = "\x01" + chr(addr) + "\x00" + self.uid
return self.transceive(cmd)[1]
def write_byte(self, addr, byte, erase=True):
"""Write a single byte to static memory area (blocks 0-14).
The target byte is zero'd first if 'erase' is True (default).
"""
log.debug("write byte at address 0x{0:03X}".format(addr))
cmd = "\x53" if erase is True else "\x1A"
cmd = cmd + chr(addr) + chr(byte) + self.uid
return self.transceive(cmd)
def read_block(self, block):
"""Read an 8-byte data block at address (block * 8).
"""
log.debug("read block at address 0x{0:03X}".format(block*8))
cmd = "\x02" + chr(block) + 8 * chr(0) + self.uid
return self.transceive(cmd)[1:9]
def write_block(self, block, data, erase=True):
"""Write an 8-byte data block at address (block * 8).
The target bytes are zero'd first if 'erase' is True (default).
"""
log.debug("write block at address 0x{0:03X}".format(block*8))
cmd = "\x54" if erase is True else "\x1B"
cmd = cmd + chr(block) + data + self.uid
return self.transceive(cmd)
| mit | -6,768,400,176,963,971,000 | 34.841751 | 79 | 0.543166 | false | 3.651801 | false | false | false |
hodgestar/overalls | overalls/core.py | 1 | 2481 | # -*- coding: utf-8 -*-
"""Core classes for overalls."""
class FileCoverage(object):
"""Coverage result for a single file.
:param str filename:
Name of the filename the results are for.
:param str source:
Source code (newline separated).
:param list coverage:
List of coverage results. One entry per line.
Entries may be an integer (number of lines covered) or
None (not relevant, e.g. for lines that are comments).
"""
def __init__(self, filename, source, coverage):
self.filename = filename
self.source = source
self.coverage = coverage
def __repr__(self):
return "<%s filename=%r source=%r coverage=%r>" % (
self.__class__.__name__,
self.filename,
self.source[:30],
self.coverage[:30],
)
def __eq__(self, other):
if not isinstance(other, FileCoverage):
return NotImplemented
return all((self.filename == other.filename,
self.source == other.source,
self.coverage == other.coverage))
class CoverageResults(object):
"""Coverage results."""
def __init__(self):
self.files = []
def append(self, file_coverage):
self.files.append(file_coverage)
def extend(self, results):
self.files.extend(results.files)
class Collector(object):
"""Object that knows how collect coverage results from a single source."""
def results(self):
"""Should read the coverage source and return a `Results` instance."""
raise NotImplementedError("Collectors should implement .results.")
class StaticCollector(Collector):
"""Collector that returns a static set of results."""
def __init__(self, results):
self._results = results
def results(self):
return self._results
class CollectorSet(Collector):
"""Collector that combines results from a set of other collectors."""
def __init__(self, collectors):
self._collectors = collectors
def results(self):
combined = CoverageResults()
for collector in self._collectors:
combined.extend(collector.results())
return combined
class Uploader(object):
"""Object that knows how to upload coverage results somewhere."""
def upload(self, results):
"""Upload a set of `Results`."""
raise NotImplementedError("Uploaders should implement .upload.")
| bsd-3-clause | 4,973,367,806,479,127,000 | 26.876404 | 78 | 0.615075 | false | 4.620112 | false | false | false |
ShadowApex/pygame-sdl2 | pygame2/display/__init__.py | 1 | 13684 | #!/usr/bin/python
"""Window handling module."""
import sdl2.ext
import pygame2
from sdl2 import rect, render
from sdl2.ext.compat import isiterable
try:
import sdl2.sdlgfx
except:
pass
window = None
class Window(object):
def __init__(self, title="Pygame2", size=(800, 600), type="hardware", fullscreen=False):
"""An object used to create SDL2 windows.
The *Window* object contains backwards compatible methods for the
pygame display and creates a simple way to render surfaces to the
screen.
Args:
title (str): The title of the window.
size (tuple of int, optional): The size of the window in pixels,
defaults to (800, 600).
type (str, optional): The type of SDL2 window to create. Can be
either "hardware" or "software", defaults to "hardware".
fullscreen (boolean, optional): Whether or not the window is
fullscreen, defaults to False.
"""
self.title = title
self.size = size
self.width = size[0]
self.height = size[1]
self.type = type
# Create our SDL2 window.
self.sdl2_window = sdl2.ext.Window(title, size)
self.sdl2_window.show()
self.world = sdl2.ext.World()
self.systems = []
self.sprites = []
self.to_blit = []
# Set up our renderer.
if type == "software":
self.texture_renderer = None
self.sprite_renderer = SoftwareRenderer(self.sdl2_window)
self.factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
elif type == "hardware":
self.texture_renderer = sdl2.ext.Renderer(self.sdl2_window)
self.sprite_renderer = TextureRenderer(self.texture_renderer)
self.factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE,
renderer=self.texture_renderer)
self.sw_factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
# Add our renderer as a system that will be called when
# world.process() is called.
self.world.add_system(self.sprite_renderer)
def update(self):
"""Updates the contents of the window.
When this method is called, we render all sprites that have been added
to our "to_blit" list.
Args:
None
Returns:
None
"""
dt = 0
for system in self.systems:
system.process(self, dt)
#if self.type == "hardware":
# self.texture_renderer.clear()
#if self.sprites:
# self.sprite_renderer.render(self.sprites)
if self.to_blit:
self.sprite_renderer.render(self.to_blit)
self.to_blit = []
#self.world.process()
self.sdl2_window.refresh()
def blit(self, surface, position):
"""Adds a sprite to our list of sprites to be drawn on update.
This method allows backwards compatibility of pygame projects by
setting the sprite's position and adding it to our "to_blit" list.
Args:
surface (pygame2.Surface): The surface object containing the sprite
to draw on the screen.
position (tuple of int): The (x, y) position on the screen to draw
the sprite at.
Returns:
None
"""
sprite = surface.sprite
if not position:
position = [sprite.x, sprite.y]
else:
sprite.x = position[0]
sprite.y = position[1]
#self.sprite_renderer.render(sprite)
self.to_blit.append(sprite)
def toggle_fullscreen(self):
"""Toggles fullscreen.
This method toggles fullscreen using the SDL2_SetWindowFullscreen
function.
Args:
None
Returns:
None
"""
sdl2.SDL_SetWindowFullscreen(self.sdl2_window,
sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP)
def set_caption(self, title):
"""Sets the title of the SDL2 window.
This method allows backwards compatibility with pygame.
Args:
title (str): The title of the window.
Returns:
None
"""
self.title = title
self.sdl2_window.title = title
def set_icon(self, icon_path):
"""Sets the icon of the window.
This method allows backwards compatibility with pygame.
Args:
icon_path (str): Path to the icon file to use.
Returns:
None
"""
pass
def add_system(self, system):
"""Adds an object with a "process" method that is executed on update.
This method employs a new way to define "systems" that will be called
whenever the window's "update" method is called.
Args:
system (object): An object with a "process" method.
"""
self.systems.append(system)
def fill(self, color):
"""Fills the window with an RGB(A) color.
This method provides a backwards compatible method for filling the
screen with a particular color.
Args:
color (tuple of int): The (r, g, b, a) color values to fill the
screen.
Returns:
None
"""
if self.type == "software":
# Fill the screen with black every frame.
sdl2.ext.fill(self.sprite_renderer.surface,
sdl2.ext.Color(color[0], color[1], color[2]))
elif self.type == "hardware":
self.texture_renderer.clear(color)
def get_rect(self):
"""Gets the rectangle of the current window.
This method provides a pygame-compatible way to get the rectangle
of the current window.
Args:
None
Returns:
A pygame2.Rect object.
"""
rect = pygame2.Rect(0, 0, self.width, self.height)
return rect
class SoftwareRenderer(sdl2.ext.SoftwareSpriteRenderSystem):
def __init__(self, window):
"""Creates an SDL2 software renderer used for software rendering.
SDL2 is capable of using either software or texture-based rendering.
Texture rendering uses hardware acceleration to draw 2d sprites,
while software rendering uses the CPU to draw 2d sprites.
Args:
window (pygame2.display.Window): The pygame2 window object.
"""
super(SoftwareRenderer, self).__init__(window)
def render(self, components):
"""Renders a sprite or list of sprites to the screen.
This is a modified version of the original pysdl2 software render
method, but includes the ability to rotate sprites using sdlgfx.
Note that sdlgfx must be installed for this method to work.
Args:
components (SDL2 Sprite or List): A sprite or list of sprites to
render to the screen.
Returns:
None
"""
# Fill the screen with black every frame.
#sdl2.ext.fill(self.surface, sdl2.ext.Color(0, 0, 0))
# If we're using software rendering, do rotation using sdlgfx.
if isiterable(components):
sprites = []
for sprite in components:
rotozoom = sdl2.sdlgfx.rotozoomSurface
surface = rotozoom(sprite.original.surface,
sprite.angle,
1.0,
1).contents
sdl2.SDL_FreeSurface(sprite.surface)
sprite.surface = surface
sprites.append(sprite)
components = sprites
else:
surface = sdl2.sdlgfx.rotozoomSurface(components.original.surface,
components.angle,
1.0,
1).contents
sdl2.SDL_FreeSurface(components.surface)
components.surface = surface
super(SoftwareRenderer, self).render(components)
class TextureRenderer(sdl2.ext.TextureSpriteRenderSystem):
def __init__(self, target):
"""Creates an SDL2 texture renderer used for hardware rendering.
SDL2 is capable of using either software or texture-based rendering.
Texture rendering uses hardware acceleration to draw 2d sprites,
while software rendering uses the CPU to draw 2d sprites.
Args:
target (sdl2.ext.Renderer): An SDL2 texture renderer object.
"""
super(TextureRenderer, self).__init__(target)
def render(self, sprites, x=None, y=None):
"""Renders a sprite or list of sprites to the screen.
This method overrides the render method of the
sdl2.ext.TextureSpriteRenderSystem to use "SDL_RenderCopyEx" instead
of "SDL_RenderCopy" to allow sprite rotation:
http://wiki.libsdl.org/SDL_RenderCopyEx
Args:
sprites (SDL2 Sprite or List): A sprite or list of sprites to
render to the screen.
x (int, optional): X position to render the sprite, defaults to None
y (int, optional): Y position to render the sprite, defaults to None
Returns:
None
"""
r = rect.SDL_Rect(0, 0, 0, 0)
if isiterable(sprites):
rcopy = render.SDL_RenderCopyEx
renderer = self.sdlrenderer
x = x or 0
y = y or 0
for sp in sprites:
r.x = x + sp.x
r.y = y + sp.y
r.w, r.h = sp.size
if rcopy(renderer, sp.texture, None, r, sp.angle, None, render.SDL_FLIP_NONE) == -1:
raise SDLError()
else:
r.x = sprites.x
r.y = sprites.y
r.w, r.h = sprites.size
if x is not None and y is not None:
r.x = x
r.y = y
render.SDL_RenderCopyEx(self.sdlrenderer,
sprites.texture,
None,
r,
sprites.angle,
None,
render.SDL_FLIP_NONE)
render.SDL_RenderPresent(self.sdlrenderer)
def create(size=(800, 600), title="Pygame2", type="hardware"):
"""Creates an SDL2 window.
This method provides a pygame-like way to create a window.
Args:
size (tuple of int, optional): An (x, y) size of the window to create,
defaults to (800, 600)
title (str, optional): The title of the window, defaults to "Pygame2"
type (str, optional): The type of sprite renderer to use. Can be either
"software" or "hardware". Defaults to "hardware".
Returns:
A pygame2.display.Window object.
"""
return set_mode(size, title, type)
def set_mode(size=(800, 600), title="Pygame2", type="hardware"):
"""Creates an SDL2 window with the provided size.
This method provides a pygame-compatible way to create a window.
Args:
size (tuple of int, optional): An (x, y) size of the window to create,
defaults to (800, 600)
title (str, optional): The title of the window, defaults to "Pygame2"
type (str, optional): The type of sprite renderer to use. Can be either
"software" or "hardware". Defaults to "hardware".
Returns:
A pygame2.display.Window object.
"""
global window
if not window:
window = Window(title, size, type)
else:
raise Exception("Error: Cannot create a window after one has already been created.")
return window
def set_caption(title):
"""Sets the title of the current window.
This method provides a pygame-compatible way to set the window caption.
Args:
title (str): The title of the window.
Returns:
A pygame2.display.Window object.
"""
global window
if window:
window.set_caption(title)
else:
window = Window(title)
return window
def update():
"""Updates the contents of the current window.
This method provides a pygame-compatible way to refresh the window.
Args:
None
Returns:
None
"""
global window
window.update()
def flip():
"""Updates the contents of the current window.
This method provides a pygame-compatible way to refresh the window.
Args:
None
Returns:
None
"""
update()
def get_surface():
"""Returns a copy of the current window object.
This method provides a pygame-compatible method to get the current window.
Args:
None
Returns:
A pygame2.display.Window object.
"""
global window
return window
def set_mode(size, fullscreen=0, depth=32):
"""Sets the resolution of the window.
This method provides a pygame-compatible way to create or set the window
size.
Args:
size (tuple of int): The (x, y) size of the window.
fullscreen (boolean, optional): Whether or not to set the window to
fullscreen mode, defaults to 0.
depth (int, optional): Legacy argument for pygame compatibility, defaults
to 32.
Returns:
A pygame2.display.Window object.
"""
global window
if window:
sdl2.SDL_SetWindowSize(window.sdl2_window.window, size[0], size[1])
window.size = size
window.width = size[0]
window.height = size[1]
else:
window = Window(size=size)
return window
| gpl-2.0 | -5,348,630,576,886,839,000 | 27.214433 | 100 | 0.57739 | false | 4.339994 | false | false | false |
SeMorgana/ctf | defcon2014/3dttt.py | 1 | 5141 | #5/17/2014
import telnetlib
import random
from utility import *
tn = telnetlib.Telnet("3dttt_87277cd86e7cc53d2671888c417f62aa.2014.shallweplayaga.me",1234)
X = 'X'
O = 'O'
def get_sym(coor): #sym => symmetric
if coor == 0:
return 2
if coor == 1:
return 1
if coor == 2:
return 0
def get_move(new_O_pos):
x,y,z = new_O_pos #x,y are in wrong order
return (get_sym(x),get_sym(y),get_sym(z))
def get_new_pos(pre,cur):
for i in cur:
if not (i in pre):
return i
def is_all_empty(open_all):
ret = True
for i in range(9):
ret = ret and (len(open_all[i]) == 0)
return ret
def get_next_open(open_all): #open_all: tuple of list of tuples
valid = []
for i in range(9):
if len(open_all[i])>0:
if i in [0,1,2]:
z = 0
elif i in [3,4,5]:
z = 1
elif i in [6,7,8]:
z = 2
for j in open_all[i]:
valid.append((j[0],j[1],z))
index = random.randint(0,len(valid)-1)
return valid[index]
#return (open_all[i][0][0],open_all[i][0][1],z)
def get_empty(row1,row_num):
open_list =[] #list of tuples
lis = row1.split()
if len(lis) == 2:
open_list.append((row_num,0));
open_list.append((row_num,1));
open_list.append((row_num,2));
elif len(lis) == 3:
if X in lis:
index = lis.index(X)
if index == 0:
open_list.append((row_num,1))
open_list.append((row_num,2))
elif index == 1:
open_list.append((row_num,0))
open_list.append((row_num,2))
elif index == 2:
open_list.append((row_num,0))
open_list.append((row_num,1))
elif O in lis:
index = lis.index(O)
if index == 0:
open_list.append((row_num,1))
open_list.append((row_num,2))
elif index == 1:
open_list.append((row_num,0))
open_list.append((row_num,2))
elif index == 2:
open_list.append((row_num,0))
open_list.append((row_num,1))
elif len(lis) == 4:
if lis[0] == '|':
open_list.append((row_num,0))
elif lis[3] == '|':
open_list.append((row_num,2))
else:
open_list.append((row_num,1))
return open_list
def main():
score_list = get_score_list()
turns = 0
pre_Olist = [] #list of tuples
cur_Olist = [] #same above
while True:
ret = tn.read_until("y\n")
print ret
tn.read_until("0")
row00 = tn.read_until("\n").strip()
tn.read_until("1") #skip
row01 = tn.read_until("\n").strip()
tn.read_until("2") #skip
row02 = tn.read_until("\n").strip()
ret = tn.read_until("y\n")
tn.read_until("0")
row10 = tn.read_until("\n").strip()
tn.read_until("1") #skip
row11 = tn.read_until("\n").strip()
tn.read_until("2") #skip
row12 = tn.read_until("\n").strip()
ret = tn.read_until("y\n")
tn.read_until("0")
row20 = tn.read_until("\n").strip()
tn.read_until("1") #skip
row21 = tn.read_until("\n").strip()
tn.read_until("2") #skip
row22 = tn.read_until("\n").strip()
#print row00
#print row01
#print row02
#print ""
open0 = (get_empty(row00,0), get_empty(row01,1), get_empty(row02,2))
#print row10
#print row11
#print row12
#print ""
open1 = (get_empty(row10,0), get_empty(row11,1), get_empty(row12,2))
#print row20
#print row21
#print row22
open2 = (get_empty(row20,0), get_empty(row21,1), get_empty(row22,2))
rows = (row00,row01,row02,row10,row11,row12,row20,row21,row22)
ret = tn.read_some()
print ret
open_all = (open0[0],open0[1],open0[2],open1[0],open1[1],open1[2],open2[0],open2[1],open2[2])
open_list = convert_open_list(open_all)
if is_all_empty(open_all):
ret = tn.read_some()
print ret
pre_Olist = []
cur_Olist = []
turns = 0
#return
continue
y,x,z = get_next_open(open_all)
Xlist = get_pos_list(rows,'X')
Olist = get_pos_list(rows,'O')
next_move = minimax(Xlist,Olist,open_list)
print "next move", next_move
#get_score(score_list,Xlist,Olist)
if turns==0:
send = "1,1,1"
cur_Olist = get_pos_list(rows,'O')
turns += 1
else:
pre_Olist = cur_Olist;
cur_Olist = get_pos_list(rows,'O')
new_pos = get_new_pos(pre_Olist,cur_Olist)
#y,x,z = get_move(new_pos)
y,x,z = next_move
send = str(x)+","+str(y)+","+str(z)
print "sending ",send
tn.write(send+"\n")
if __name__=="__main__":
main()
| gpl-3.0 | -4,251,141,648,086,245,400 | 26.345745 | 101 | 0.48298 | false | 3.076601 | false | false | false |
consbio/seedsource-core | seedsource_core/django/seedsource/ppt.py | 1 | 14306 | import datetime
import os
from io import BytesIO
from PIL import Image
from PIL.Image import isImageType
from django.conf import settings
from django.utils.translation import ugettext as _
from pptx import Presentation
from pptx.enum.shapes import MSO_SHAPE_TYPE
from pptx.enum.text import PP_PARAGRAPH_ALIGNMENT
from pptx.util import Inches, Pt
SEEDSOURCE_TITLE = getattr(settings, 'SEEDSOURCE_TITLE', _('Seedlot Selection Tool'))
class PPTCreator(object):
def __init__(self):
self.presentation = None
self.width = None
self.height = None
def degree_sign(self, s):
return s.replace('°', '°')
def add_text(self, text_frame, lines):
for line in lines:
paragraph = text_frame.add_paragraph()
for segment in line:
text, size, bold = segment
run = paragraph.add_run()
run.text = text
run.font.size = Pt(size)
run.font.bold = bold
def get_transfer_method_text(self, method, center):
if method != 'seedzone':
method_text = _('Custom transfer limits, climatic center based on the selected location')
elif center == 'zone':
method_text = _('Transfer limits and climatic center based on seed zone')
else:
method_text = _('Transfer limits based on seed zone, climatic center based on the selected location')
return method_text
def replace_shape_image(self, shape, image):
im_bytes = BytesIO()
image.save(im_bytes, 'PNG')
shape.part.related_parts[shape._element.blip_rId].blob = im_bytes.getvalue()
def replace_shape_text(self, shape, text):
paragraph = shape.text_frame.paragraphs[0]
for run in paragraph.runs[1:]:
paragraph._p.remove(run._r)
paragraph.runs[0].text = text
def add_title_text(self, slide, title):
shape = slide.shapes.add_textbox(Inches(.41), Inches(.23), Inches(9.18), Inches(.5))
tf = shape.text_frame
tf.text = title
paragraph = tf.paragraphs[0]
paragraph.font.size = Pt(24)
paragraph.alignment = PP_PARAGRAPH_ALIGNMENT.CENTER
def render_template(self, context):
for slide in self.presentation.slides:
self.render_template_slide(slide, context)
def render_template_slide(self, slide, context):
for shape in slide.shapes:
if shape.name not in context:
continue
value = context[shape.name]
if callable(value):
value(shape)
elif shape.shape_type == MSO_SHAPE_TYPE.PICTURE:
if not isImageType(value):
raise TypeError('Template value {} must be an Image type'.format(shape.name))
self.replace_shape_image(shape, value)
elif shape.shape_type == MSO_SHAPE_TYPE.TEXT_BOX:
if not isinstance(value, str):
raise TypeError('Template value {} must be a string'.format(shape.name))
self.replace_shape_text(shape, value)
def add_slide(self):
slide = self.presentation.slides.add_slide(self.presentation.slide_layouts[0])
# Delete placeholders
for placeholder in (slide.placeholders):
placeholder.element.getparent().remove(placeholder.element)
return slide
def create_overview_slide(self, context):
objective = context['objective']
location_label = context['location_label']
point = context['point']
elevation = context['elevation']
seedlot_year = context['seedlot_year']
site_year = context['site_year']
site_model = context['site_model']
method = context['method']
center = context['center']
location = (point['y'], point['x'])
data_url = 'http://cfcg.forestry.ubc.ca/projects/climate-data/climatebcwna/#ClimateWNA'
method_text = self.get_transfer_method_text(method, center)
slide = self.add_slide()
self.add_title_text(slide, '{} - {}'.format(SEEDSOURCE_TITLE, datetime.datetime.today().strftime('%m/%d/%Y')))
# Body
shape = slide.shapes.add_textbox(Inches(.65), Inches(.73), Inches(8.69), Inches(6.19))
shape.text_frame.word_wrap = True
self.add_text(shape.text_frame, (
((_('Objective:') + ' ', 18, True), (objective, 18, False)),
(('', 18, False),),
(('{}: '.format(location_label), 18, True), ('{}, {}'.format(*location), 18, False)),
((_('Elevation:') + ' ', 18, True), (_('{elevation} ft').format(elevation=elevation), 18, False)),
(('', 18, False),),
((_('Climate scenarios'), 24, True),),
((_('Seedlot climate:') + ' ', 18, True), (seedlot_year, 18, False)),
((_('Planting site climate: ') + ' ', 18, True), (' '.join((site_year, site_model or '')), 18, False)),
(('', 18, False),),
((_('Transfer limit method:') + ' ', 18, True), (method_text, 18, False)),
(('\n', 18, False),),
((_('Data URL:') + ' ', 12, True), (data_url, 12, False))
))
# Hyperlink URL
shape.text_frame.paragraphs[-1].runs[-1].hyperlink.address = data_url
def create_variables_slide(self, variables):
slide = self.add_slide()
self.add_title_text(slide, _('Climate Variables'))
num_rows = len(variables) + 1
table = slide.shapes.add_table(
num_rows, 3, Inches(.47), Inches(.73), Inches(9.05), Inches(.4) * num_rows
).table
cols = table.columns
cols[0].width = Inches(4.59)
cols[1].width = Inches(2.06)
cols[2].width = Inches(2.4)
# Headers
table.cell(0, 0).text = _('Variable')
table.cell(0, 1).text = _('Center')
table.cell(0, 2).text = _('Transfer limit') + ' (+/-)'
for i, variable in enumerate(variables, start=1):
units = self.degree_sign(variable['units'])
center_label = ' '.join((variable['value'], units))
limit_label = '{} {}{}'.format(
variable['limit'],
units,
' ({})'.format(_('modified')) if variable['modified'] else ''
)
table.cell(i, 0).text = variable['label']
table.cell(i, 1).text = center_label
table.cell(i, 2).text = limit_label
def create_constraints_slide(self, constraints):
slide = self.add_slide()
self.add_title_text(slide, _('Constraints'))
num_rows = len(constraints) + 1
table = slide.shapes.add_table(
num_rows, 3, Inches(.47), Inches(.73), Inches(9.05), Inches(.4) * num_rows
).table
cols = table.columns
cols[0].width = Inches(4.59)
cols[1].width = Inches(2.06)
cols[2].width = Inches(2.4)
# Headers
table.cell(0, 0).text = _('Constraint')
table.cell(0, 1).text = _('Value')
table.cell(0, 2).text = '{} (+/-)'.format(_('Range'))
for i, constraint in enumerate(constraints, start=1):
if constraint['type'] == 'shapefile':
table.cell(i, 0).text = constraint['label']
table.cell(i, 1)._tc.set('gridSpan', str(2))
table.cell(i, 1).text = constraint['filename']
else:
table.cell(i, 0).text = constraint['label']
table.cell(i, 1).text = constraint['value']
table.cell(i, 2).text = constraint['range']
def add_presenter_notes(self, slide, context):
text_frame = slide.notes_slide.notes_text_frame
objective = context['objective']
location_label = context['location_label']
point = context['point']
elevation = context['elevation']
seedlot_year = context['seedlot_year']
site_year = context['site_year']
site_model = context['site_model']
method = context['method']
center = context['center']
location = (point['y'], point['x'])
method_text = self.get_transfer_method_text(method, center)
lines = [
((_('Objective:') + ' ', 12, True), (objective, 12, False)),
(('{}: '.format(location_label), 12, True), ('{}, {}'.format(*location), 12, False)),
((_('Elevation:') + ' ', 12, True), ('{} ft'.format(elevation), 12, False)),
((_('Climate Scenarios'), 12, True),),
((' {} '.format(_('Seedlot climate:')), 12, True), (seedlot_year, 12, False)),
((' {} '.format(_('Planting site climate:')), 12, True), ('{} {}'.format(site_year, site_model or ''), 12, False)),
((_('Transfer limit method:') + ' ', 12, True), (method_text, 12, False))
]
if method == 'seedzone':
band = context['band']
band_str = ", {}' - {}'".format(band[0], band[1]) if band else ''
lines += [
((_('Species:') + ' ', 12, True), (context['species'], 12, False)),
((_('Seed zone:') + ' ', 12, True), (context['zone'] + band_str, 12, False))
]
# Variables table
variables = context['variables']
name_width = max([len(_('Variable'))] + [len(x['label']) for x in variables]) + 3
center_width = max(
[len(_('Center'))] + [len(' '.join([str(x['value']), self.degree_sign(x['units'])])) for x in variables]
) + 3
transfer_width = max(
[len(_('Transfer limit') + ' (+/-)')] +
[
len('{} {}{}'.format(
x['limit'],
self.degree_sign(x['units']),
' ({})'.format(_('modified')) if x['modified'] else '')
)
for x in variables
]
)
lines += [
(('', 12, False),),
((_('Variables'), 12, True),),
((''.join([
_('Variable').ljust(name_width),
_('Center').ljust(center_width),
_('Transfer limit') + ' (+/-)'.ljust(transfer_width)
]), 12, False),),
(('-' * (name_width + center_width + transfer_width), 12, False),)
]
for variable in context['variables']:
units = self.degree_sign(variable['units'])
lines += [
((''.join([
variable['label'].ljust(name_width),
'{} {}'.format(variable['value'], units).ljust(center_width),
'{} {}{}'.format(
variable['limit'],
units,
' ({})'.format(_('modified')) if variable['modified'] else ''
)
]), 12, False),)
]
if context['constraints']:
# Constraints table
constraints = context['constraints']
name_width = max([len('Constraint')] + [len(x['label']) for x in constraints]) + 3
value_width = max(
[len(_('Value'))] +
[len(x['value']) for x in [c for c in constraints if c['type'] != 'shapefile']]
) + 3
range_width = max(
[len(_('Range') + ' (+/-)')] +
[len(x['range']) for x in [c for c in constraints if c['type'] != 'shapefile']]
) + 3
# Ensure we have room for shapefile name, if there is one
shape_constraint = [c for c in constraints if c['type'] == 'shapefile']
if shape_constraint:
filename_width = len(shape_constraint[0]['filename'])
if filename_width > value_width + range_width:
range_width = filename_width - value_width
lines += [
(('', 12, False),),
((_('Constraints'), 12, True),),
((''.join([
_('Constraint').ljust(name_width),
_('Value').ljust(value_width),
_('Range') + ' (+/-)'.ljust(range_width)
]), 12, False),),
(('-' * (name_width + value_width + range_width), 12, False),)
]
for constraint in constraints:
if constraint['type'] == 'shapefile':
lines += [
((''.join([
constraint['label'].ljust(name_width),
constraint['filename'].ljust(value_width + range_width)
]), 12, False),)
]
else:
lines += [
((''.join([
constraint['label'].ljust(name_width),
constraint['value'].ljust(value_width),
constraint['range'].ljust(range_width)
]), 12, False),)
]
self.add_text(text_frame, lines)
for paragraph in text_frame.paragraphs:
paragraph.font.name = 'Andale Mono'
def get_presentation(self, context):
self.presentation = Presentation(
os.path.join(os.path.dirname(__file__), 'templates', 'pptx', 'report.pptx')
)
self.width = Inches(self.presentation.slide_width / Inches(1))
self.height = Inches(self.presentation.slide_height / Inches(1))
self.render_template(dict(
coord_bottom=self.degree_sign(context['south']),
coord_right=self.degree_sign(context['east']),
coord_left=self.degree_sign(context['west']),
coord_top=self.degree_sign(context['north']),
scale_label=context['scale'],
map_image=Image.open(context['image_data']),
attribution=_('Generated {date} by the Seedlot Selection Tool').format(
date=datetime.datetime.today().strftime('%m/%d/%Y')
)
))
self.create_overview_slide(context)
self.create_variables_slide(context['variables'])
if context['constraints']:
self.create_constraints_slide(context['constraints'])
self.add_presenter_notes(self.presentation.slides[0], context)
return self.presentation
| bsd-3-clause | -6,586,210,890,521,238,000 | 39.070028 | 128 | 0.515414 | false | 4.057005 | false | false | false |
jrobeson/platformio | platformio/builder/scripts/frameworks/cmsis.py | 7 | 1375 | # Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
"""
CMSIS
The ARM Cortex Microcontroller Software Interface Standard (CMSIS) is a
vendor-independent hardware abstraction layer for the Cortex-M processor
series and specifies debugger interfaces. The CMSIS enables consistent and
simple software interfaces to the processor for interface peripherals,
real-time operating systems, and middleware. It simplifies software
re-use, reducing the learning curve for new microcontroller developers
and cutting the time-to-market for devices.
http://www.arm.com/products/processors/cortex-m/cortex-microcontroller-software-interface-standard.php
"""
from os.path import join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
env.Replace(
PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-cmsis")
)
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkCMSIS"),
join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}")
)
env.Append(
CPPPATH=[
join("$BUILD_DIR", "FrameworkCMSIS"),
join("$BUILD_DIR", "FrameworkCMSISVariant")
]
)
envsafe = env.Clone()
#
# Target: Build Core Library
#
libs = []
libs.append(envsafe.BuildLibrary(
join("$BUILD_DIR", "FrameworkCMSISVariant"),
join("$PLATFORMFW_DIR", "variants", "${BOARD_OPTIONS['build']['variant']}")
))
env.Append(LIBS=libs)
| mit | -2,500,168,662,549,293,000 | 25.442308 | 102 | 0.735273 | false | 3.552972 | false | false | false |
bq/bitbloq-offline | app/res/web2board/darwin/Web2Board.app/Contents/Resources/res/Scons/sconsFiles/SCons/Platform/darwin.py | 5 | 2523 | """engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/darwin.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import os
import posix
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
# put macports paths at front to override Apple's versions, fink path is after
# For now let people who want Macports or Fink tools specify it!
# env['ENV']['PATH'] = '/opt/local/bin:/opt/local/sbin:' + env['ENV']['PATH'] + ':/sw/bin'
# Store extra system paths in env['ENV']['PATHOSX']
filelist = ['/etc/paths',]
# make sure this works on Macs with Tiger or earlier
try:
dirlist = os.listdir('/etc/paths.d')
except:
dirlist = []
for file in dirlist:
filelist.append('/etc/paths.d/'+file)
for file in filelist:
if os.path.isfile(file):
f = open(file, 'r')
lines = f.readlines()
for line in lines:
if line:
env.AppendENVPath('PATHOSX', line.strip('\n'))
f.close()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | -882,511,768,773,137,900 | 34.041667 | 109 | 0.692826 | false | 3.875576 | false | false | false |
NoBodyCam/TftpPxeBootBareMetal | nova/api/openstack/compute/contrib/security_groups.py | 1 | 21571 | # Copyright 2011 OpenStack LLC.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
from xml.dom import minidom
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def make_rule(elem):
elem.set('id')
elem.set('parent_group_id')
proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
proto.text = 'ip_protocol'
from_port = xmlutil.SubTemplateElement(elem, 'from_port')
from_port.text = 'from_port'
to_port = xmlutil.SubTemplateElement(elem, 'to_port')
to_port.text = 'to_port'
group = xmlutil.SubTemplateElement(elem, 'group', selector='group')
name = xmlutil.SubTemplateElement(group, 'name')
name.text = 'name'
tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id')
tenant_id.text = 'tenant_id'
ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
selector='ip_range')
cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
cidr.text = 'cidr'
def make_sg(elem):
elem.set('id')
elem.set('tenant_id')
elem.set('name')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
rules = xmlutil.SubTemplateElement(elem, 'rules')
rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules')
make_rule(rule)
sg_nsmap = {None: wsgi.XMLNS_V11}
class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_rule',
selector='security_group_rule')
make_rule(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group',
selector='security_group')
make_sg(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_groups')
elem = xmlutil.SubTemplateElement(root, 'security_group',
selector='security_groups')
make_sg(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request"""
dom = minidom.parseString(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
if sg_node is not None:
if sg_node.hasAttribute('name'):
security_group['name'] = sg_node.getAttribute('name')
desc_node = self.find_first_child_named(sg_node,
"description")
if desc_node:
security_group['description'] = self.extract_text(desc_node)
return {'body': {'security_group': security_group}}
class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request"""
dom = minidom.parseString(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
def _extract_security_group_rule(self, node):
"""Marshal the security group rule attribute of a parsed request"""
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_rule')
if sg_rule_node is not None:
ip_protocol_node = self.find_first_child_named(sg_rule_node,
"ip_protocol")
if ip_protocol_node is not None:
sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
from_port_node = self.find_first_child_named(sg_rule_node,
"from_port")
if from_port_node is not None:
sg_rule['from_port'] = self.extract_text(from_port_node)
to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
if to_port_node is not None:
sg_rule['to_port'] = self.extract_text(to_port_node)
parent_group_id_node = self.find_first_child_named(sg_rule_node,
"parent_group_id")
if parent_group_id_node is not None:
sg_rule['parent_group_id'] = self.extract_text(
parent_group_id_node)
group_id_node = self.find_first_child_named(sg_rule_node,
"group_id")
if group_id_node is not None:
sg_rule['group_id'] = self.extract_text(group_id_node)
cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
if cidr_node is not None:
sg_rule['cidr'] = self.extract_text(cidr_node)
return sg_rule
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = NativeSecurityGroupAPI()
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule):
sg_rule = {}
sg_rule['id'] = rule.id
sg_rule['parent_group_id'] = rule.parent_group_id
sg_rule['ip_protocol'] = rule.protocol
sg_rule['from_port'] = rule.from_port
sg_rule['to_port'] = rule.to_port
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule.group_id:
source_group = self.security_group_api.get(context,
id=rule.group_id)
sg_rule['group'] = {'name': source_group.name,
'tenant_id': source_group.project_id}
else:
sg_rule['ip_range'] = {'cidr': rule.cidr}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group.id
security_group['description'] = group.description
security_group['name'] = group.name
security_group['tenant_id'] = group.project_id
security_group['rules'] = []
for rule in group.rules:
security_group['rules'] += [self._format_security_group_rule(
context, rule)]
return security_group
def _authorize_context(self, req):
context = req.environ['nova.context']
authorize(context)
return context
def _validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
raise exc.HTTPBadRequest(explanation=msg)
def _from_body(self, body, key):
if not body:
raise exc.HTTPUnprocessableEntity()
value = body.get(key, None)
if value is None:
raise exc.HTTPUnprocessableEntity()
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@wsgi.serializers(xml=SecurityGroupTemplate)
def show(self, req, id):
"""Return data about the given security group."""
context = self._authorize_context(req)
id = self._validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = self._authorize_context(req)
id = self._validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req):
"""Returns a list of security groups"""
context = self._authorize_context(req)
raw_groups = self.security_group_api.list(context,
project=context.project_id)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.serializers(xml=SecurityGroupTemplate)
@wsgi.deserializers(xml=SecurityGroupXMLDeserializer)
def create(self, req, body):
"""Creates a new security group."""
context = self._authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create(context, group_name,
group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupRuleTemplate)
@wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer)
def create(self, req, body):
context = self._authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
parent_group_id = self._validate_id(sg_rule.get('parent_group_id',
None))
security_group = self.security_group_api.get(context, None,
parent_group_id, map_exception=True)
try:
values = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
if values is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
values['parent_group_id'] = security_group.id
if self.security_group_api.rule_exists(security_group, values):
msg = _('This rule already exists in group %s') % parent_group_id
raise exc.HTTPBadRequest(explanation=msg)
security_group_rule = self.security_group_api.add_rules(
context, parent_group_id, security_group['name'], [values])[0]
return {"security_group_rule": self._format_security_group_rule(
context,
security_group_rule)}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self._validate_id(group_id)
#check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = self._authorize_context(req)
id = self._validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule.parent_group_id
security_group = self.security_group_api.get(context, None, group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = self._authorize_context(req)
self.security_group_api.ensure_default(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
groups = db.security_group_get_by_instance(context, instance['id'])
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = NativeSecurityGroupAPI()
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
try:
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_servers(self, req, servers):
key = "security_groups"
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
resp_obj.attach(xml=SecurityGroupServerTemplate())
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
resp_obj.attach(xml=SecurityGroupServersTemplate())
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroupsTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return "security_groups" in datum
def make_server(elem):
secgrps = SecurityGroupsTemplateElement('security_groups')
elem.append(secgrps)
secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group',
selector="security_groups")
secgrp.set('name')
class SecurityGroupServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1)
class SecurityGroupServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1)
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support"""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2011-07-21T00:00:00+00:00"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
class NativeSecurityGroupAPI(compute.api.SecurityGroupAPI):
@staticmethod
def raise_invalid_property(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_group(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exc.HTTPNotFound(explanation=msg)
| apache-2.0 | -4,588,146,646,452,176,000 | 36.191379 | 79 | 0.597283 | false | 4.256314 | false | false | false |
sihrc/indikitty | indikitty/process.py | 1 | 1721 | import base64
import logging
from itertools import izip
from cStringIO import StringIO
import cv2
import numpy as np
from skimage.io import imread
import indicoio
from .keys import INDICO_API_KEY
indicoio.api_key = INDICO_API_KEY
SERVER_URL = "http://localhost:3000/random"
def get_faces_dimens(image_string, bounds):
try:
result = indicoio.facial_localization(image_string)
faces = []
for face in result:
x1, y1 = face["top_left_corner"]
x2, y2 = face["bottom_right_corner"]
faces.append((x1, y1, x2, y2))
return faces
except Exception as e:
logger.error(e)
def get_suitable_cat(width, height):
image = imread(SERVER_URL)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
resized_image = cv2.resize(image, (width, height))
return resized_image
def show(img):
cv2.imshow("result", img)
cv2.waitKey()
def process(input_url):
input_image = imread(input_url)
input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
image_string = base64.b64encode(cv2.imencode(".png", input_image)[1].tostring())
faces = get_faces_dimens(image_string, input_image.shape)
cats = []
for x1, y1, x2, y2 in faces:
width, height = x2 - x1, y2 - y1
cat = get_suitable_cat(width, height)
cats.append(cat)
for (x1, y1, x2, y2), cat in izip(faces, cats):
if cat.shape[2] > 3:
mask = np.where(cat[:,:,3])
input_image[y1:y2, x1:x2, :][mask] = cat[:,:,:3][mask]
else:
input_image[y1:y2, x1:x2, :] = cat
output = StringIO()
output.write(cv2.imencode(".png", input_image)[1].tostring())
output.seek(0)
return output
| mit | 1,366,001,676,076,868,400 | 27.213115 | 84 | 0.622313 | false | 2.987847 | false | false | false |
cloudkick/libcloud | libcloud/compute/drivers/gogrid.py | 1 | 15600 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GoGrid driver
"""
import time
import hashlib
try:
import json
except ImportError:
import simplejson as json
from libcloud.common.base import ConnectionUserAndKey, Response
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.types import MalformedResponseError
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
HOST = 'api.gogrid.com'
PORTS_BY_SECURITY = { True: 443, False: 80 }
API_VERSION = '1.7'
STATE = {
"Starting": NodeState.PENDING,
"On": NodeState.RUNNING,
"Off": NodeState.PENDING,
"Restarting": NodeState.REBOOTING,
"Saving": NodeState.PENDING,
"Restoring": NodeState.PENDING,
}
GOGRID_INSTANCE_TYPES = {'512MB': {'id': '512MB',
'name': '512MB',
'ram': 512,
'disk': 30,
'bandwidth': None,
'price':0.095},
'1GB': {'id': '1GB',
'name': '1GB',
'ram': 1024,
'disk': 60,
'bandwidth': None,
'price':0.19},
'2GB': {'id': '2GB',
'name': '2GB',
'ram': 2048,
'disk': 120,
'bandwidth': None,
'price':0.38},
'4GB': {'id': '4GB',
'name': '4GB',
'ram': 4096,
'disk': 240,
'bandwidth': None,
'price':0.76},
'8GB': {'id': '8GB',
'name': '8GB',
'ram': 8192,
'disk': 480,
'bandwidth': None,
'price':1.52}}
class GoGridResponse(Response):
def success(self):
if self.status == 403:
raise InvalidCredsError('Invalid credentials', GoGridNodeDriver)
if self.status == 401:
raise InvalidCredsError('API Key has insufficient rights', GoGridNodeDriver)
if not self.body:
return None
try:
return json.loads(self.body)['status'] == 'success'
except ValueError:
raise MalformedResponseError('Malformed reply', body=self.body, driver=GoGridNodeDriver)
def parse_body(self):
if not self.body:
return None
return json.loads(self.body)
def parse_error(self):
try:
return json.loads(self.body)["list"][0]['message']
except ValueError:
return None
class GoGridConnection(ConnectionUserAndKey):
"""
Connection class for the GoGrid driver
"""
host = HOST
responseCls = GoGridResponse
def add_default_params(self, params):
params["api_key"] = self.user_id
params["v"] = API_VERSION
params["format"] = 'json'
params["sig"] = self.get_signature(self.user_id, self.key)
return params
def get_signature(self, key, secret):
""" create sig from md5 of key + secret + time """
m = hashlib.md5(key+secret+str(int(time.time())))
return m.hexdigest()
class GoGridIpAddress(object):
"""
IP Address
"""
def __init__(self, id, ip, public, state, subnet):
self.id = id
self.ip = ip
self.public = public
self.state = state
self.subnet = subnet
class GoGridNode(Node):
# Generating uuid based on public ip to get around missing id on
# create_node in gogrid api
#
# Used public ip since it is not mutable and specified at create time,
# so uuid of node should not change after add is completed
def get_uuid(self):
return hashlib.sha1(
"%s:%d" % (self.public_ip,self.driver.type)
).hexdigest()
class GoGridNodeDriver(NodeDriver):
"""
GoGrid node driver
"""
connectionCls = GoGridConnection
type = Provider.GOGRID
name = 'GoGrid'
features = {"create_node": ["generates_password"]}
_instance_types = GOGRID_INSTANCE_TYPES
def _get_state(self, element):
try:
return STATE[element['state']['name']]
except:
pass
return NodeState.UNKNOWN
def _get_ip(self, element):
return element.get('ip').get('ip')
def _get_id(self, element):
return element.get('id')
def _to_node(self, element, password=None):
state = self._get_state(element)
ip = self._get_ip(element)
id = self._get_id(element)
n = GoGridNode(id=id,
name=element['name'],
state=state,
public_ip=[ip],
private_ip=[],
extra={'ram': element.get('ram').get('name'),
'isSandbox': element['isSandbox'] == 'true'},
driver=self.connection.driver)
if password:
n.extra['password'] = password
return n
def _to_image(self, element):
n = NodeImage(id=element['id'],
name=element['friendlyName'],
driver=self.connection.driver)
return n
def _to_images(self, object):
return [ self._to_image(el)
for el in object['list'] ]
def _to_location(self, element):
location = NodeLocation(id=element['id'],
name=element['name'],
country="US",
driver=self.connection.driver)
return location
def _to_ip(self, element):
ip = GoGridIpAddress(id=element['id'],
ip=element['ip'],
public=element['public'],
subnet=element['subnet'],
state=element["state"]["name"])
ip.location = self._to_location(element['datacenter'])
return ip
def _to_ips(self, object):
return [ self._to_ip(el)
for el in object['list'] ]
def _to_locations(self, object):
return [self._to_location(el)
for el in object['list']]
def list_images(self, location=None):
params = {}
if location is not None:
params["datacenter"] = location.id
images = self._to_images(
self.connection.request('/api/grid/image/list', params).object)
return images
def list_nodes(self):
passwords_map = {}
res = self._server_list()
try:
for password in self._password_list()['list']:
try:
passwords_map[password['server']['id']] = password['password']
except KeyError:
pass
except InvalidCredsError:
# some gogrid API keys don't have permission to access the password list.
pass
return [ self._to_node(el, passwords_map.get(el.get('id')))
for el
in res['list'] ]
def reboot_node(self, node):
id = node.id
power = 'restart'
res = self._server_power(id, power)
if not res.success():
raise Exception(res.parse_error())
return True
def destroy_node(self, node):
id = node.id
res = self._server_delete(id)
if not res.success():
raise Exception(res.parse_error())
return True
def _server_list(self):
return self.connection.request('/api/grid/server/list').object
def _password_list(self):
return self.connection.request('/api/support/password/list').object
def _server_power(self, id, power):
# power in ['start', 'stop', 'restart']
params = {'id': id, 'power': power}
return self.connection.request("/api/grid/server/power", params,
method='POST')
def _server_delete(self, id):
params = {'id': id}
return self.connection.request("/api/grid/server/delete", params,
method='POST')
def _get_first_ip(self, location=None):
ips = self.ex_list_ips(public=True, assigned=False, location=location)
try:
return ips[0].ip
except IndexError:
raise LibcloudError('No public unassigned IPs left',
GoGridNodeDriver)
def list_sizes(self, location=None):
return [ NodeSize(driver=self.connection.driver, **i)
for i in self._instance_types.values() ]
def list_locations(self):
locations = self._to_locations(
self.connection.request('/api/common/lookup/list',
params={'lookup': 'ip.datacenter'}).object)
return locations
def ex_create_node_nowait(self, **kwargs):
"""Don't block until GoGrid allocates id for a node
but return right away with id == None.
The existance of this method is explained by the fact
that GoGrid assigns id to a node only few minutes after
creation."""
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
try:
ip = kwargs['ex_ip']
except KeyError:
ip = self._get_first_ip(kwargs.get('location'))
params = {'name': name,
'image': image.id,
'description': kwargs.get('ex_description', ''),
'isSandbox': str(kwargs.get('ex_issandbox', False)).lower(),
'server.ram': size.id,
'ip': ip}
object = self.connection.request('/api/grid/server/add',
params=params, method='POST').object
node = self._to_node(object['list'][0])
return node
def create_node(self, **kwargs):
"""Create a new GoGird node
See L{NodeDriver.create_node} for more keyword args.
@keyword ex_description: Description of a Node
@type ex_description: C{string}
@keyword ex_issandbox: Should server be sendbox?
@type ex_issandbox: C{bool}
@keyword ex_ip: Public IP address to use for a Node. If not
specified, first available IP address will be picked
@type ex_ip: C{string}
"""
node = self.ex_create_node_nowait(**kwargs)
timeout = 60 * 20
waittime = 0
interval = 2 * 60
while node.id is None and waittime < timeout:
nodes = self.list_nodes()
for i in nodes:
if i.public_ip[0] == node.public_ip[0] and i.id is not None:
return i
waittime += interval
time.sleep(interval)
if id is None:
raise Exception("Wasn't able to wait for id allocation for the node %s" % str(node))
return node
def ex_save_image(self, node, name):
"""Create an image for node.
Please refer to GoGrid documentation to get info
how prepare a node for image creation:
http://wiki.gogrid.com/wiki/index.php/MyGSI
@keyword node: node to use as a base for image
@type node: L{Node}
@keyword name: name for new image
@type name: C{string}
"""
params = {'server': node.id,
'friendlyName': name}
object = self.connection.request('/api/grid/image/save', params=params,
method='POST').object
return self._to_images(object)[0]
def ex_edit_node(self, **kwargs):
"""Change attributes of a node.
@keyword node: node to be edited
@type node: L{Node}
@keyword size: new size of a node
@type size: L{NodeSize}
@keyword ex_description: new description of a node
@type ex_description: C{string}
"""
node = kwargs['node']
size = kwargs['size']
params = {'id': node.id,
'server.ram': size.id}
if 'ex_description' in kwargs:
params['description'] = kwargs['ex_description']
object = self.connection.request('/api/grid/server/edit',
params=params).object
return self._to_node(object['list'][0])
def ex_edit_image(self, **kwargs):
"""Edit metadata of a server image.
@keyword image: image to be edited
@type image: L{NodeImage}
@keyword public: should be the image public?
@type public: C{bool}
@keyword ex_description: description of the image (optional)
@type ex_description: C{string}
@keyword name: name of the image
@type name C{string}
"""
image = kwargs['image']
public = kwargs['public']
params = {'id': image.id,
'isPublic': str(public).lower()}
if 'ex_description' in kwargs:
params['description'] = kwargs['ex_description']
if 'name' in kwargs:
params['friendlyName'] = kwargs['name']
object = self.connection.request('/api/grid/image/edit',
params=params).object
return self._to_image(object['list'][0])
def ex_list_ips(self, **kwargs):
"""Return list of IP addresses assigned to
the account.
@keyword public: set to True to list only
public IPs or False to list only
private IPs. Set to None or not specify
at all not to filter by type
@type public: C{bool}
@keyword assigned: set to True to list only addresses
assigned to servers, False to list unassigned
addresses and set to None or don't set at all
not no filter by state
@type assigned: C{bool}
@keyword location: filter IP addresses by location
@type location: L{NodeLocation}
@return: C{list} of L{GoGridIpAddress}es
"""
params = {}
if "public" in kwargs and kwargs["public"] is not None:
params["ip.type"] = {True: "Public",
False: "Private"}[kwargs["public"]]
if "assigned" in kwargs and kwargs["assigned"] is not None:
params["ip.state"] = {True: "Assigned",
False: "Unassigned"}[kwargs["assigned"]]
if "location" in kwargs and kwargs['location'] is not None:
params['datacenter'] = kwargs['location'].id
ips = self._to_ips(
self.connection.request('/api/grid/ip/list',
params=params).object)
return ips
| apache-2.0 | 6,769,058,029,211,202,000 | 32.191489 | 100 | 0.54641 | false | 4.25184 | false | false | false |
cah-rfelsburg/sflvault | server/sflvault/model/__init__.py | 2 | 15281 | # -=- encoding: utf-8 -=-
#
# SFLvault - Secure networked password store and credentials manager.
#
# Copyright (C) 2008 Savoir-faire Linux inc.
#
# Author: Alexandre Bourget <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from base64 import b64decode, b64encode
from datetime import datetime
import re
from Crypto.PublicKey import ElGamal
from sqlalchemy import Column, MetaData, Table, types, ForeignKey
from sqlalchemy.orm import mapper, relation, backref
from sqlalchemy.orm import scoped_session, sessionmaker, eagerload, lazyload
from sqlalchemy.orm import eagerload_all
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy import sql
from sflvault.model import meta
from sflvault.model.meta import Session, metadata
from sflvault.model.custom_types import JSONEncodedDict
from sflvault.common.crypto import *
from zope.sqlalchemy import ZopeTransactionExtension
# TODO: add an __all__ statement here, to speed up loading...
def init_model(engine):
"""Call me before using any of the tables or classes in the model."""
sm = sessionmaker(autoflush=True,
bind=engine,
expire_on_commit=False,
extension=ZopeTransactionExtension())
meta.engine = engine
meta.Session = scoped_session(sm)
users_table = Table("users", metadata,
Column('id', types.Integer, primary_key=True),
Column('username', types.Unicode(50)),
# ElGamal user's public key.
Column('pubkey', types.Text),
# Used in the login/authenticate challenge
Column('logging_token', types.Binary(35)),
# Time until the token is valid.
Column('logging_timeout', types.DateTime),
# This stamp is used to wipe users which haven't 'setup'
# their account before this date/time
Column('waiting_setup', types.DateTime, nullable=True),
Column('created_time', types.DateTime,
default=datetime.now),
# Admin flag, allows to add users, and grant access.
Column('is_admin', types.Boolean, default=False)
)
usergroups_table = Table('users_groups', metadata,
Column('id', types.Integer, primary_key=True),
Column('user_id', types.Integer,
ForeignKey('users.id')),
Column('group_id', types.Integer,
ForeignKey('groups.id')),
Column('is_admin', types.Boolean, default=False),
Column('cryptgroupkey', types.Text),
)
groups_table = Table('groups', metadata,
Column('id', types.Integer, primary_key=True),
Column('name', types.Unicode(50)),
Column('hidden', types.Boolean, default=False),
# ElGamal group's public key
Column('pubkey', types.Text),
)
servicegroups_table = Table('services_groups', metadata,
Column('id', types.Integer, primary_key=True),
Column('service_id', types.Integer,
ForeignKey('services.id')),
Column('group_id', types.Integer,
ForeignKey('groups.id')),
Column('cryptsymkey', types.Text),
)
customers_table = Table('customers', metadata,
Column('id', types.Integer, primary_key=True),
Column('name', types.Unicode(100)),
Column('created_time', types.DateTime),
# username, même si yé effacé.
Column('created_user', types.Unicode(50))
)
machines_table = Table('machines', metadata,
Column('id', types.Integer, primary_key=True),
Column('customer_id', types.Integer, ForeignKey('customers.id')), # relation customers
Column('created_time', types.DateTime,
default=datetime.now),
# Unicode lisible, un peu de descriptif
Column('name', types.Unicode(150)),
# Domaine complet.
Column('fqdn', types.Unicode(150)),
# Adresse IP si fixe, sinon 'dyn'
Column('ip', types.String(100)),
# Où il est ce serveur, location géographique, et dans
# la ville et dans son boîtier (4ième ?)
Column('location', types.Text),
# Notes sur le serveur, références, URLs, etc..
Column('notes', types.Text)
)
# Each ssh or web app. service that have a password.
services_table = Table('services', metadata,
Column('id', types.Integer, primary_key=True),
# Service lies on which Machine ?
Column('machine_id', types.Integer,
ForeignKey('machines.id')),
# Hierarchical service required to access this one ?
Column('parent_service_id', types.Integer,
ForeignKey('services.id')),
# REMOVED: replaced by servicegroups_table many-to-many.
#Column('group_id', types.Integer,
# ForeignKey('groups.id')),
Column('url', types.String(250)), # Full service desc.
# simplejson'd python structures, depends on url scheme
Column('metadata', JSONEncodedDict), # reserved.
Column('notes', types.Text),
Column('secret', types.Text),
Column('secret_last_modified', types.DateTime,
default=datetime.now)
)
class Service(object):
def __repr__(self):
return "<Service s#%d: %s>" % (self.id, self.url)
class Machine(object):
def __repr__(self):
return "<Machine m#%d: %s (%s %s)>" % (self.id if self.id else 0,
self.name, self.fqdn, self.ip)
class User(object):
def setup_expired(self):
"""Return True/False if waiting_setup has expired"""
if self.waiting_setup and self.waiting_setup < datetime.now():
return True
else:
return False
def elgamal(self):
"""Return the ElGamal object, ready to encrypt stuff."""
e = ElGamal.ElGamalobj()
(e.p, e.g, e.y) = unserial_elgamal_pubkey(self.pubkey)
return e
def __repr__(self):
return "<User u#%d: %s>" % (self.id, self.username)
class UserGroup(object):
"""Membership of a user to a group"""
def __init__(self, user=None):
if user:
self.user = user
def __repr__(self):
return "<UserGroup element>"
class ServiceGroup(object):
"""membership of a service to a group"""
def __init__(self, service=None):
if service:
self.service = service
def __repr__(self):
return "<ServiceGroup element>"
class Group(object):
def __repr__(self):
return "<Group: %s>" % (self.name)
def elgamal(self):
"""Return the ElGamal object, ready to encrypt stuff."""
e = ElGamal.ElGamalobj()
(e.p, e.g, e.y) = unserial_elgamal_pubkey(self.pubkey)
return e
class Customer(object):
def __repr__(self):
return "<Customer c#%d: %s>" % (self.id, self.name)
# User
# .groups_assoc
# UserGroup
# .group
# Group
# .services_assoc
# ServiceGroup
# .service
# Service
# Service
# .groups_assoc
# ServiceGroup
# .group
# Group
# .users_assoc
# UserGroup
# .user
# User
# Map each class to its corresponding table.
mapper(User, users_table, {
# Quick access to services...
'services': relation(Service,
secondary=usergroups_table.join(servicegroups_table, usergroups_table.c.group_id==servicegroups_table.c.group_id),
backref='users',
viewonly=True,
),
'groups_assoc': relation(UserGroup, backref='user')
})
User.groups = association_proxy('groups_assoc', 'group')
mapper(UserGroup, usergroups_table, {
'group': relation(Group, backref='users_assoc')
})
mapper(Group, groups_table, {
'services_assoc': relation(ServiceGroup, backref='group')
})
Group.users = association_proxy('users_assoc', 'user')
Group.services = association_proxy('services_assoc', 'service')
mapper(ServiceGroup, servicegroups_table, {
'service': relation(Service, backref='groups_assoc')
})
mapper(Service, services_table, {
'children': relation(Service,
lazy=False,
backref=backref('parent', uselist=False,
remote_side=[services_table.c.id]),
primaryjoin=services_table.c.parent_service_id==services_table.c.id)
})
Service.groups = association_proxy('groups_assoc', 'group')
mapper(Machine, machines_table, {
'services': relation(Service, backref='machine', lazy=False)
})
mapper(Customer, customers_table, {
'machines': relation(Machine, backref='customer', lazy=False)
})
################ Helper functions ################
def query(cls):
"""Shortcut to meta.Session.query(cls)"""
return meta.Session.query(cls)
def get_user(user, eagerload_all_=None):
"""Get a user provided a username or an int(user_id), possibly eager
loading some relations.
"""
if isinstance(user, int):
uq = query(User).filter_by(id=user)
else:
uq = query(User).filter_by(username=user)
if eagerload_all_:
uq = uq.options(eagerload_all(eagerload_all_))
usr = uq.first()
if not usr:
raise LookupError("Invalid user: %s" % user)
return usr
def get_objects_ids(objects_ids, object_type):
"""Return a list of valid IDs for certain object types.
objects_ids - Must be a list of str or ints
object_type - One of 'groups', 'machines', 'customers
"""
return get_objects_list(objects_ids, object_type, return_objects=False)[1]
def get_objects_list(objects_ids, object_type, eagerload_all_=None,
return_objects=True):
"""Get a list of objects by their IDs, either as int or str. Make
sure we return a list of integers as IDs.
object_type - the type of object to be returned. It must be one of
['groups', 'machines', 'customers']
return_objects - whether to return the actual objects or not.
"""
objects_types_assoc = {'groups': Group,
'machines': Machine,
'customers': Customer}
# Check if object_type is valid
if object_type not in objects_types_assoc:
raise ValueError("Invalid object type: %s" % (object_type))
# Get variables
if isinstance(objects_ids, str):
objects_ids = [int(objects_ids)]
elif isinstance(objects_ids, int):
objects_ids = [objects_ids]
elif isinstance(objects_ids, list):
objects_ids = [int(x) for x in objects_ids]
else:
raise ValueError("Invalid %s specification" % (object_type))
# Pull the objects/IDs from the DB
obj = objects_types_assoc[object_type]
if return_objects:
objects_q = query(obj).filter(obj.id.in_(objects_ids))
if eagerload_all_:
objects_q = objects_q.options(eagerload_all(eagerload_all_))
objects = objects_q.all()
else:
objects_q = sql.select([obj.id]).where(obj.id.in_(objects_ids))
objects = meta.Session.execute(objects_q).fetchall()
if len(objects) != len(objects_ids):
# Woah, you specified objects that didn't exist ?
valid_objects = [x.id for x in objects]
invalid_objects = [x for x in objects_ids if x not in valid_objects]
raise ValueError("Invalid %s: %s" % (object_type, invalid_objects))
return (objects if return_objects else None, objects_ids)
def search_query(swords, filters=None, verbose=False):
# Create the join..
sel = sql.outerjoin(customers_table, machines_table).outerjoin(services_table)
if filters:
# Remove filters that are just None
filters = dict([(x, filters[x]) for x in filters if filters[x]])
if not isinstance(filters, dict):
raise RuntimeError("filters param must be a dict, or None")
if [True for x in filters if not isinstance(filters[x], list)]:
raise RuntimeError("filters themselves must be a list of ints")
if 'groups' in filters:
sel = sel.join(servicegroups_table)
sel = sel.select(use_labels=True)
if filters:
if 'groups' in filters:
sel = sel.where(ServiceGroup.group_id.in_(filters['groups']))
if 'machines' in filters:
sel = sel.where(Machine.id.in_(filters['machines']))
if 'customers' in filters:
sel = sel.where(Customer.id.in_(filters['customers']))
# Fields to search in..
textfields = [Customer.name,
Machine.name,
Machine.fqdn,
Machine.ip,
Machine.location,
Machine.notes,
Service.url,
Service.notes]
numfields = [Customer.id,
Machine.id,
Service.id]
# TODO: distinguish between INTEGER fields and STRINGS and search
# differently (check only ==, and only if word can be converted to int())
andlist = []
for word in swords:
orlist = [field.ilike('%%%s%%' % word) for field in textfields]
if word.isdigit():
# Search numeric fields too
orlist += [field == int(word) for field in numfields]
orword = sql.or_(*orlist)
andlist.append(orword)
sel = sel.where(sql.and_(*andlist))
sel = sel.order_by(Machine.name, Service.url)
return meta.Session.execute(sel)
| gpl-3.0 | -5,143,276,163,699,025,000 | 36.24878 | 139 | 0.567313 | false | 4.23869 | false | false | false |
ntkurapati/navidile4 | src/nameparser.py | 1 | 14484 | # -*- coding: utf-8 -*-
"""
A simple Python module for parsing human names into their individual components.
Components::
* Title
* First name
* Middle names
* Last names
* Suffixes
Works for a variety of common name formats for latin-based languages. Over
100 unit tests with example names. Should be unicode safe but it's fairly untested.
HumanName instances will pass an equals (==) test if their lower case unicode
representations are the same.
--------
Copyright Derek Gulbranson, May 2009 <derek73 at gmail>.
http://code.google.com/p/python-nameparser
Parser logic based on PHP nameParser.php by G. Miernicki
http://code.google.com/p/nameparser/
LGPL
http://www.opensource.org/licenses/lgpl-license.html
This library is free software; you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free Software
Foundation; either version 2.1 of the License, or (at your option) any later
version.
This library is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
"""
__author__ = "Derek Gulbranson"
__revision__ = "$Id: nameparser.py 25 2010-08-18 19:57:57Z derek73 $"
__version__ = "0.1.2"
__license__ = "LGPL"
__url__ = "http://code.google.com/p/python-nameparser"
TITLES = [
'dr','doctor','miss','misses','mr','mister','mrs','ms','sir',
'rev','madam','madame','AB','2ndLt','Amn','1stLt','A1C','Capt','SrA','Maj',
'SSgt','LtCol','TSgt','Col','BrigGen','1stSgt','MajGen','SMSgt','LtGen',
'1stSgt','Gen','CMSgt','1stSgt','CCMSgt','CMSAF','PVT','2LT','PV2','1LT',
'PFC','CPT','SPC','MAJ','CPL','LTC','SGT','COL','SSG','BG','SFC','MG',
'MSG','LTG','1SGT','GEN','SGM','CSM','SMA','WO1','WO2','WO3','WO4','WO5',
'ENS','SA','LTJG','SN','LT','PO3','LCDR','PO2','CDR','PO1','CAPT','CPO',
'RADM(LH)','SCPO','RADM(UH)','MCPO','VADM','MCPOC','ADM','MPCO-CG','CWO-2',
'CWO-3','CWO-4','Pvt','2ndLt','PFC','1stLt','LCpl','Capt','Cpl','Maj','Sgt',
'LtCol','SSgt','Col','GySgt','BGen','MSgt','MajGen','1stSgt','LtGen','MGySgt',
'Gen','SgtMaj','SgtMajMC','WO-1','CWO-2','CWO-3','CWO-4','CWO-5','ENS','SA',
'LTJG','SN','LT','PO3','LCDR','PO2','CDR','PO1','CAPT','CPO','RDML','SCPO',
'RADM','MCPO','VADM','MCPON','ADM','FADM','WO1','CWO2','CWO3','CWO4','CWO5'
]
# QUESTIONABLE_TITLES could be last names or they could be titles
# TODO: need to find best way to deal with these.. http://code.google.com/p/python-nameparser/issues/detail?id=3
QUESTIONABLE_TITLES = ['judge',]
# PUNC_TITLES could be names or titles, but if they have period at the end they're a title
PUNC_TITLES = ['hon.']
PREFICES = [
'abu','bon','ben','bin','da','dal','de','del','der','de','di','e','ibn',
'la','le','san','st','ste','van','vel','von'
]
SUFFICES = [
'esq','esquire','jr','sr','2','i','ii','iii','iv','v','clu','chfc',
'cfp','md','phd'
]
CAPITALIZATION_EXCEPTIONS = {
'ii': 'II',
'iii': 'III',
'iv': 'IV',
'md': 'M.D.',
'phd': 'Ph.D.'
}
CONJUNCTIONS = ['&', 'and', 'et', 'e', 'und', 'y']
ENCODING = 'utf-8'
import re
re_spaces = re.compile(r"\s+")
re_word = re.compile(r"\w+")
re_mac = re.compile(r'^(ma?c)(\w)', re.I)
re_initial = re.compile(r'^(\w\.|[A-Z])?$')
import logging
# logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('HumanName')
def lc(value):
'''Lower case and remove any periods to normalize for comparison.'''
if not value:
return u''
return value.lower().replace('.','')
def is_not_initial(value):
return not re_initial.match(value)
class HumanName(object):
"""
Parse a person's name into individual components
Usage::
>>> name = HumanName("Dr. Juan Q. Xavier de la Vega III")
>>> name.title
'Dr.'
>>> name.first
'Juan'
>>> name.middle
'Q. Xavier'
>>> name.last
'de la Vega'
>>> name.suffix
'III'
>>> name2 = HumanName("de la Vega, Dr. Juan Q. Xavier III")
>>> name == name2
True
>>> len(name)
5
>>> list(name)
['Dr.', 'Juan', 'Q. Xavier', 'de la Vega', 'III']
>>> name[1:-1]
[u'Juan', u'Q. Xavier', u'de la Vega']
"""
def __init__(self, full_name=u"", titles=TITLES, prefices=PREFICES,
suffices=SUFFICES, punc_titles=PUNC_TITLES, conjunctions=CONJUNCTIONS,
capitalization_exceptions=CAPITALIZATION_EXCEPTIONS):
super(HumanName, self).__init__()
self.titles = titles
self.punc_titles = punc_titles
self.conjunctions = conjunctions
self.prefices = prefices
self.suffices = suffices
self.capitalization_exceptions = capitalization_exceptions
self.full_name = full_name
self.title = u""
self.first = u""
self.suffixes = []
self.middle_names = []
self.last_names = []
self.unparsable = False
self.count = 0
self.members = ['title','first','middle','last','suffix']
if self.full_name:
self.parse_full_name()
def __iter__(self):
return self
def __len__(self):
l = 0
for x in self:
l += 1
return l
def __eq__(self, other):
"""
HumanName instances are equal to other objects whose
lower case unicode representations are the same
"""
return unicode(self).lower() == unicode(other).lower()
def __ne__(self, other):
return not unicode(self).lower() == unicode(other).lower()
def __getitem__(self, key):
return [getattr(self, x) for x in self.members[key]]
def next(self):
if self.count >= len(self.members):
self.count = 0
raise StopIteration
else:
c = self.count
self.count = c + 1
return getattr(self, self.members[c]) or self.next()
def __unicode__(self):
return u" ".join(self)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
if self.unparsable:
return u"<%(class)s : [ Unparsable ] >" % {'class': self.__class__.__name__,}
return u"<%(class)s : [\n\tTitle: '%(title)s' \n\tFirst: '%(first)s' \n\tMiddle: '%(middle)s' \n\tLast: '%(last)s' \n\tSuffix: '%(suffix)s'\n]>" % {
'class': self.__class__.__name__,
'title': self.title,
'first': self.first,
'middle': self.middle,
'last': self.last,
'suffix': self.suffix,
}
@property
def middle(self):
return u" ".join(self.middle_names)
@property
def last(self):
return u" ".join(self.last_names)
@property
def suffix(self):
return u", ".join(self.suffixes)
def is_conjunction(self, piece):
return lc(piece) in self.conjunctions and is_not_initial(piece)
def is_prefix(self, piece):
return lc(piece) in self.prefices and is_not_initial(piece)
def parse_full_name(self):
if not self.full_name:
raise AttributeError("Missing full_name")
if not isinstance(self.full_name, unicode):
self.full_name = unicode(self.full_name, ENCODING)
# collapse multiple spaces
self.full_name = re.sub(re_spaces, u" ", self.full_name.strip() )
# reset values
self.title = u""
self.first = u""
self.suffixes = []
self.middle_names = []
self.last_names = []
self.unparsable = False
# break up full_name by commas
parts = [x.strip() for x in self.full_name.split(",")]
log.debug(u"full_name: " + self.full_name)
log.debug(u"parts: " + unicode(parts))
pieces = []
if len(parts) == 1:
# no commas, title first middle middle middle last suffix
for part in parts:
names = part.split(' ')
for name in names:
name.replace(',','').strip()
pieces.append(name)
log.debug(u"pieces: " + unicode(pieces))
for i, piece in enumerate(pieces):
try:
next = pieces[i + 1]
except IndexError:
next = None
try:
prev = pieces[i - 1]
except IndexError:
prev = None
if lc(piece) in self.titles:
self.title = piece
continue
if piece.lower() in self.punc_titles:
self.title = piece
continue
if not self.first:
self.first = piece.replace(".","")
continue
if (i == len(pieces) - 2) and (lc(next) in self.suffices):
self.last_names.append(piece)
self.suffixes.append(next)
break
if self.is_prefix(piece):
self.last_names.append(piece)
continue
if self.is_conjunction(piece) and i < len(pieces) / 2:
self.first += ' ' + piece
continue
if self.is_conjunction(prev) and (i-1) < len(pieces) / 2:
self.first += ' ' + piece
continue
if self.is_conjunction(piece) or self.is_conjunction(next):
self.last_names.append(piece)
continue
if i == len(pieces) - 1:
self.last_names.append(piece)
continue
self.middle_names.append(piece)
else:
if lc(parts[1]) in self.suffices:
# title first middle last, suffix [, suffix]
names = parts[0].split(' ')
for name in names:
name.replace(',','').strip()
pieces.append(name)
log.debug(u"pieces: " + unicode(pieces))
self.suffixes += parts[1:]
for i, piece in enumerate(pieces):
try:
next = pieces[i + 1]
except IndexError:
next = None
if lc(piece) in self.titles:
self.title = piece
continue
if piece.lower() in self.punc_titles:
self.title = piece
continue
if not self.first:
self.first = piece.replace(".","")
continue
if i == (len(pieces) -1) and self.is_prefix(piece):
self.last_names.append(piece + " " + next)
break
if self.is_prefix(piece):
self.last_names.append(piece)
continue
if self.is_conjunction(piece) or self.is_conjunction(next):
self.last_names.append(piece)
continue
if i == len(pieces) - 1:
self.last_names.append(piece)
continue
self.middle_names.append(piece)
else:
# last, title first middles[,] suffix [,suffix]
names = parts[1].split(' ')
for name in names:
name.replace(',','').strip()
pieces.append(name)
log.debug(u"pieces: " + unicode(pieces))
self.last_names.append(parts[0])
for i, piece in enumerate(pieces):
try:
next = pieces[i + 1]
except IndexError:
next = None
if lc(piece) in self.titles:
self.title = piece
continue
if piece.lower() in self.punc_titles:
self.title = piece
continue
if not self.first:
self.first = piece.replace(".","")
continue
if lc(piece) in self.suffices:
self.suffixes.append(piece)
continue
self.middle_names.append(piece)
try:
if parts[2]:
self.suffixes += parts[2:]
except IndexError:
pass
if not self.first and len(self.middle_names) < 1 and len(self.last_names) < 1:
self.unparsable = True
log.error(u"Unparsable full_name: " + self.full_name)
def cap_word(self, word):
if self.is_prefix(word) or self.is_conjunction(word):
return lc(word)
if word in self.capitalization_exceptions:
return self.capitalization_exceptions[word]
mac_match = re_mac.match(word)
if mac_match:
def cap_after_mac(m):
return m.group(1).capitalize() + m.group(2).capitalize()
return re_mac.sub(cap_after_mac, word)
else:
return word.capitalize()
def cap_piece(self, piece):
if not piece:
return ""
replacement = lambda m: self.cap_word(m.group(0))
return re.sub(re_word, replacement, piece)
def capitalize(self):
name = unicode(self)
if not (name == name.upper() or name == name.lower()):
return
self.title = self.cap_piece(self.title)
self.first = self.cap_piece(self.first)
self.middle_names = self.cap_piece(self.middle).split(' ')
self.last_names = self.cap_piece(self.last).split(' ')
self.suffixes = self.cap_piece(self.suffix).split(' ')
| gpl-3.0 | -5,534,955,889,332,706,000 | 34.240876 | 156 | 0.501105 | false | 3.81962 | false | false | false |
nullzero/wpcgi | wpcgi/package/p_form/p_validators.py | 1 | 1995 | #!/data/project/nullzerobot/python/bin/python
import wtforms.validators
from wtforms.validators import *
from wtforms.validators import ValidationError
from messages import msg
import re
##############################
class _Required(Required):
def __init__(self, *args, **kwargs):
if not kwargs.get('message', False):
kwargs['message'] = msg['validator-require']
super(_Required, self).__init__(*args, **kwargs)
wtforms.validators.Required = _Required
##############################
class _NumberRange(NumberRange):
def __init__(self, *args, **kwargs):
if 'message' not in kwargs:
kwargs['message'] = msg['validator-mustbe-in-min-max']
super(_NumberRange, self).__init__(*args, **kwargs)
wtforms.validators.NumberRange = _NumberRange
##############################
# Have to do like this because the original Email.__init__ contains Email itself
def Email__init__(self, message=msg['validator-invalid-email']):
super(Email, self).__init__(r'^.+@[^.].*\.[a-z]{2,10}$', re.IGNORECASE, message)
wtforms.validators.Email.__init__ = Email__init__
##############################
def _Number(negative=False, decimal=False):
charset = r'\d'
if negative:
charset += '-'
if decimal:
charset += r'\.'
def _Number(form, field):
if not field.data or not re.match('^[' + charset + ']+$', field.data):
raise ValidationError(msg['validator-not-number'])
return _Number
wtforms.validators.Number = _Number
##############################
def _Wiki():
def _Wiki(form, field):
if not field.data or any(char in field.data for char in '#'):
raise ValidationError(msg['validator-not-wiki'])
return _Wiki
wtforms.validators.Wiki = _Wiki
##############################
class _IgnoreMe(object):
def __init__(self, *args, **kwargs):
pass
__call__ = __init__
wtforms.validators.IgnoreMe = _IgnoreMe
##############################
| mit | -3,647,347,863,538,057,700 | 25.6 | 84 | 0.56792 | false | 3.911765 | false | false | false |
fishcorn/pylearn2 | pylearn2/scripts/plot_monitor.py | 37 | 10204 | #!/usr/bin/env python
"""
usage:
plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl
Loads any number of .pkl files produced by train.py. Extracts
all of their monitoring channels and prompts the user to select
a subset of them to be plotted.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow, Harm Aarts"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import gc
import numpy as np
import sys
from theano.compat.six.moves import input, xrange
from pylearn2.utils import serial
from theano.printing import _TagGenerator
from pylearn2.utils.string_utils import number_aware_alphabetical_key
from pylearn2.utils import contains_nan, contains_inf
import argparse
channels = {}
def unique_substring(s, other, min_size=1):
"""
.. todo::
WRITEME
"""
size = min(len(s), min_size)
while size <= len(s):
for pos in xrange(0,len(s)-size+1):
rval = s[pos:pos+size]
fail = False
for o in other:
if o.find(rval) != -1:
fail = True
break
if not fail:
return rval
size += 1
# no unique substring
return s
def unique_substrings(l, min_size=1):
"""
.. todo::
WRITEME
"""
return [unique_substring(s, [x for x in l if x is not s], min_size)
for s in l]
def main():
"""
.. todo::
WRITEME
"""
parser = argparse.ArgumentParser()
parser.add_argument("--out")
parser.add_argument("model_paths", nargs='+')
parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1')
options = parser.parse_args()
model_paths = options.model_paths
if options.out is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
print('generating names...')
model_names = [model_path.replace('.pkl', '!') for model_path in
model_paths]
model_names = unique_substrings(model_names, min_size=10)
model_names = [model_name.replace('!','') for model_name in
model_names]
print('...done')
for i, arg in enumerate(model_paths):
try:
model = serial.load(arg)
except Exception:
if arg.endswith('.yaml'):
print(sys.stderr, arg + " is a yaml config file," +
"you need to load a trained model.", file=sys.stderr)
quit(-1)
raise
this_model_channels = model.monitor.channels
if len(sys.argv) > 2:
postfix = ":" + model_names[i]
else:
postfix = ""
for channel in this_model_channels:
channels[channel+postfix] = this_model_channels[channel]
del model
gc.collect()
while True:
# Make a list of short codes for each channel so user can specify them
# easily
tag_generator = _TagGenerator()
codebook = {}
sorted_codes = []
for channel_name in sorted(channels,
key = number_aware_alphabetical_key):
code = tag_generator.get_tag()
codebook[code] = channel_name
codebook['<'+channel_name+'>'] = channel_name
sorted_codes.append(code)
x_axis = 'example'
print('set x_axis to example')
if len(channels.values()) == 0:
print("there are no channels to plot")
break
# If there is more than one channel in the monitor ask which ones to
# plot
prompt = len(channels.values()) > 1
if prompt:
# Display the codebook
for code in sorted_codes:
print(code + '. ' + codebook[code])
print()
print("Put e, b, s or h in the list somewhere to plot " +
"epochs, batches, seconds, or hours, respectively.")
response = input('Enter a list of channels to plot ' + \
'(example: A, C,F-G, h, <test_err>) or q to quit' + \
' or o for options: ')
if response == 'o':
print('1: smooth all channels')
print('any other response: do nothing, go back to plotting')
response = input('Enter your choice: ')
if response == '1':
for channel in channels.values():
k = 5
new_val_record = []
for i in xrange(len(channel.val_record)):
new_val = 0.
count = 0.
for j in xrange(max(0, i-k), i+1):
new_val += channel.val_record[j]
count += 1.
new_val_record.append(new_val / count)
channel.val_record = new_val_record
continue
if response == 'q':
break
#Remove spaces
response = response.replace(' ','')
#Split into list
codes = response.split(',')
final_codes = set([])
for code in codes:
if code == 'e':
x_axis = 'epoch'
continue
elif code == 'b':
x_axis = 'batche'
elif code == 's':
x_axis = 'second'
elif code == 'h':
x_axis = 'hour'
elif code.startswith('<'):
assert code.endswith('>')
final_codes.add(code)
elif code.find('-') != -1:
#The current list element is a range of codes
rng = code.split('-')
if len(rng) != 2:
print("Input not understood: "+code)
quit(-1)
found = False
for i in xrange(len(sorted_codes)):
if sorted_codes[i] == rng[0]:
found = True
break
if not found:
print("Invalid code: "+rng[0])
quit(-1)
found = False
for j in xrange(i,len(sorted_codes)):
if sorted_codes[j] == rng[1]:
found = True
break
if not found:
print("Invalid code: "+rng[1])
quit(-1)
final_codes = final_codes.union(set(sorted_codes[i:j+1]))
else:
#The current list element is just a single code
final_codes = final_codes.union(set([code]))
# end for code in codes
else:
final_codes ,= set(codebook.keys())
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
styles = list(colors)
styles += [color+'--' for color in colors]
styles += [color+':' for color in colors]
fig = plt.figure()
ax = plt.subplot(1,1,1)
# plot the requested channels
for idx, code in enumerate(sorted(final_codes)):
channel_name= codebook[code]
channel = channels[channel_name]
y = np.asarray(channel.val_record)
if contains_nan(y):
print(channel_name + ' contains NaNs')
if contains_inf(y):
print(channel_name + 'contains infinite values')
if x_axis == 'example':
x = np.asarray(channel.example_record)
elif x_axis == 'batche':
x = np.asarray(channel.batch_record)
elif x_axis == 'epoch':
try:
x = np.asarray(channel.epoch_record)
except AttributeError:
# older saved monitors won't have epoch_record
x = np.arange(len(channel.batch_record))
elif x_axis == 'second':
x = np.asarray(channel.time_record)
elif x_axis == 'hour':
x = np.asarray(channel.time_record) / 3600.
else:
assert False
ax.plot( x,
y,
styles[idx % len(styles)],
marker = '.', # add point margers to lines
label = channel_name)
plt.xlabel('# '+x_axis+'s')
ax.ticklabel_format( scilimits = (-3,3), axis = 'both')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc = 'upper left',
bbox_to_anchor = (1.05, 1.02))
# Get the axis positions and the height and width of the legend
plt.draw()
ax_pos = ax.get_position()
pad_width = ax_pos.x0 * fig.get_size_inches()[0]
pad_height = ax_pos.y0 * fig.get_size_inches()[1]
dpi = fig.get_dpi()
lgd_width = ax.get_legend().get_frame().get_width() / dpi
lgd_height = ax.get_legend().get_frame().get_height() / dpi
# Adjust the bounding box to encompass both legend and axis. Axis should be 3x3 inches.
# I had trouble getting everything to align vertically.
ax_width = 3
ax_height = 3
total_width = 2*pad_width + ax_width + lgd_width
total_height = 2*pad_height + np.maximum(ax_height, lgd_height)
fig.set_size_inches(total_width, total_height)
ax.set_position([pad_width/total_width, 1-6*pad_height/total_height, ax_width/total_width, ax_height/total_height])
if(options.yrange is not None):
ymin, ymax = map(float, options.yrange.split(':'))
plt.ylim(ymin, ymax)
if options.out is None:
plt.show()
else:
plt.savefig(options.out)
if not prompt:
break
if __name__ == "__main__":
main()
| bsd-3-clause | -91,320,550,111,798,940 | 31.189274 | 123 | 0.492258 | false | 4.200906 | false | false | false |
npcoder2k14/HackInTheNorth-PYRAG | pyrag_sports/cricketAPI.py | 1 | 5652 | import requests
import os
import bs4
import sys
try:
from flask import Flask
from flask import request
from flask.ext.cors import CORS, cross_origin
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
app = Flask(__name__)
@app.route('/cric/')
@crossdomain(origin='*')
#cors=CORS(app)
#app.config['CORS_HEADERS']='Content-Type'
#@app.route("/cric")
#@cross_origin()
except:
pass
try:
import lxml
parser = 'lxml'
except ImportError:
parser = 'html.parser'
try:
with open(sys.path[0]+'/proxy.config','r') as f:
proxies = f.read()
proxy_dict = { "http": proxies,
"https": proxies,
}
except:
import urllib
proxy_dict = urllib.getproxies()
class Cricket(object):
def get_player_stats(self, playerName, type_return='string'):
base_url="http://www.espncricinfo.com"
url="http://www.espncricinfo.com/ci/content/player/search.html?search="
names=[]
names=playerName.split('-')
playerName="+".join(names)
url=url+playerName
res=requests.get(url, stream=True, proxies=proxy_dict)
res.raise_for_status()
soup=bs4.BeautifulSoup(res.text, parser)
playerStatLink=soup.select(".ColumnistSmry")
playerStatLink=playerStatLink[1]
temp_url=playerStatLink.get('href')
url=base_url+temp_url
res=requests.get(url)
soup=bs4.BeautifulSoup(res.text, parser)
player_info=soup.select(".ciPlayerinformationtxt")
player_stats={}
for item in player_info[0:len(player_info)]:
b=item.find('b')
if b.string=="Major teams":
span=item.findAll('span')
temp=""
for it in span:
temp+=it.string+" "
else:
temp=item.find('span')
temp=temp.string
player_stats[b.string]=temp
if type_return == 'dict':
return player_stats
else:
return str(player_stats)
def live_score(self, type_return='string'):
response = requests.get('http://www.cricbuzz.com/live-scores', stream=True, proxies=proxy_dict)
soup = bs4.BeautifulSoup(response.text, parser)
team_mate = soup.findAll("div", {"class" : "cb-lv-main"})
scores = []
for i in team_mate:
scores.append(i.text)
if type_return == 'dict':
return scores
return json.dumps(str(scores))
def list_matches(self, type_return='string'):
response = requests.get('https://cricket.yahoo.com/matches/schedule', stream=True, proxies=proxy_dict)
soup = bs4.BeautifulSoup(response.text, parser)
head_list = soup.findAll("em", {"class": "ycric-table-heading"})
invited_team_list = soup.findAll("div", {"class": "ycric-table-sub-heading"})
no_list = soup.findAll("td", {"class": "sno"})
tour_dates_list = soup.findAll("span", {"class" : "matchDateTime"})
match_list = soup.findAll("td", {"class": "smatch"})
venue_list= soup.findAll("td", {"class": "svenue"})
result_list = soup.findAll("td", {"class": "sresult"})
heading = 0
nos = []
tour_date = []
team_list = []
venue = []
result = []
ans = []
cnt = 0
for i in match_list:
if i.text != "Match":
team_list.append(i.text)
for i in no_list:
if i.text !="#":
nos.append(i.text)
for i in venue_list:
if i.text!="Venue":
venue.append(i.text)
for i in result_list:
if i.text!="Result":
result.append(i.text)
cnt =len(nos)
check = 0
matches = {}
for i in range(cnt):
if nos[i]=="1":
header = head_list[heading].text.lstrip()
matches[header] = []
heading = heading+1
matches[header].append((team_list[i].lstrip(), tour_dates_list[i].text.lstrip(), venue[i].lstrip(), result[i].lstrip()))
if type_return == 'dict':
return matches
return json.dumps(str(matches))
def news(self, type_return='string'):
base_url='http://www.cricbuzz.com/cricket-news/latest-news'
res=requests.get(base_url, stream=True, proxies=proxy_dict)
soup = bs4.BeautifulSoup(res.text, parser)
news = soup.select(".cb-col-33 a")
news_dict={}
for all_news in news:
if str(all_news.get("title"))!="More Photos" and str(all_news.get("title"))!="None":
news_dict[all_news.get("title")]=base_url+all_news.get("href")
if type_return == 'dict':
return news_dict
return json.dumps(str(news_dict)) % _GET_PARAMS('callback')
if __name__ == '__main__':
app.add_url_rule('/',view_func=attr.news)
app.add_url_rule('/cric/matches/',view_func=attr.list_matches)
app.add_url_rule('/cric/live/',view_func=attr.live_score)
app.add_url_rule('/cric/player_stats/',view_func=attr.get_player_stats)
#myvar = request.GET["myvar"]
port = int(os.environ.get("PORT", 5001))
app.run(host='0.0.0.0', port=port,debug=True)
"""
#app.add_url_rule('/cric/player_stats/',view_func=attr.player_stats)
player_stats=attr.get_player_stats("Virender Sehwag")
print (player_stats)
print (attr.live_score())
print (attr.list_matches())
print (attr.news())
"""
| mit | 8,007,146,770,010,000,000 | 33.888889 | 132 | 0.564402 | false | 3.50838 | false | false | false |
IamRafy/numibot | plugins/timezone.py | 2 | 1156 | import re
import yaml
import time
import datetime
from libs import request
import geocode
config = yaml.load(open('config.yaml', 'r'))
def matches(text):
return re.search('time\s+(at|in)\s+(.+)', text, re.IGNORECASE)
def decode(text):
g = matches(text).groups()
if g and g[1]:
return [g[1]]
def query(m, q):
results = geocode.raw(q)
if results:
location = results.get('geometry').get('location')
address = results.get('formatted_address')
timestamp = time.time()
timezone_results = request.ajax('https://maps.googleapis.com/maps/api/timezone/json?key=' + config.get('map_timezone_api_key') + '&location=' + str(location.get('lat')) + ',' + str(location.get('lng')) + '×tamp=' + str(timestamp))
if timezone_results.get('status') == 'OK':
readabletime = datetime.datetime.fromtimestamp(timestamp + timezone_results.get('rawOffset') + timezone_results.get('dstOffset')).strftime('%A, %d %B %Y, %I:%M %p')
return 'It\'s {0} in {1} - {2} ({3}).'.format(readabletime, address, timezone_results.get('timeZoneId'), timezone_results.get('timeZoneName'))
| apache-2.0 | -3,195,195,900,452,444,700 | 30.243243 | 243 | 0.634948 | false | 3.4 | false | false | false |
brianrodri/oppia | core/domain/subscription_services.py | 2 | 9181 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for managing subscriptions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.platform import models
(user_models,) = models.Registry.import_models([
models.NAMES.user
])
def subscribe_to_thread(user_id, feedback_thread_id):
"""Subscribes a user to a feedback thread.
WARNING: Callers of this function should ensure that the user_id and
feedback_thread_id are valid.
Args:
user_id: str. The user ID of the new subscriber.
feedback_thread_id: str. The ID of the feedback thread.
"""
subscribe_to_threads(user_id, [feedback_thread_id])
def subscribe_to_threads(user_id, feedback_thread_ids):
"""Subscribes a user to feedback threads.
WARNING: Callers of this function should ensure that the user_id and
the feedback_thread_ids are valid.
Args:
user_id: str. The user ID of the new subscriber.
feedback_thread_ids: list(str). The IDs of the feedback threads.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
if not subscriptions_model:
subscriptions_model = user_models.UserSubscriptionsModel(id=user_id)
# Using sets for efficiency.
current_feedback_thread_ids_set = set(
subscriptions_model.general_feedback_thread_ids
)
# Determine which thread_ids are not already in the subscriptions model.
feedback_thread_ids_to_add_to_subscriptions_model = list(
set(feedback_thread_ids).difference(current_feedback_thread_ids_set)
)
subscriptions_model.general_feedback_thread_ids.extend(
feedback_thread_ids_to_add_to_subscriptions_model
)
subscriptions_model.update_timestamps()
subscriptions_model.put()
def subscribe_to_exploration(user_id, exploration_id):
"""Subscribes a user to an exploration (and, therefore, indirectly to all
feedback threads for that exploration).
WARNING: Callers of this function should ensure that the user_id and
exploration_id are valid.
Args:
user_id: str. The user ID of the new subscriber.
exploration_id: str. The exploration ID.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
if not subscriptions_model:
subscriptions_model = user_models.UserSubscriptionsModel(id=user_id)
if exploration_id not in subscriptions_model.exploration_ids:
subscriptions_model.exploration_ids.append(exploration_id)
subscriptions_model.update_timestamps()
subscriptions_model.put()
def subscribe_to_creator(user_id, creator_id):
"""Subscribes a user (learner) to a creator.
WARNING: Callers of this function should ensure that the user_id and
creator_id are valid.
Args:
user_id: str. The user ID of the new subscriber.
creator_id: str. The user ID of the creator.
"""
if user_id == creator_id:
raise Exception('User %s is not allowed to self subscribe.' % user_id)
subscribers_model_creator = user_models.UserSubscribersModel.get(
creator_id, strict=False)
subscriptions_model_user = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
if not subscribers_model_creator:
subscribers_model_creator = user_models.UserSubscribersModel(
id=creator_id)
if not subscriptions_model_user:
subscriptions_model_user = user_models.UserSubscriptionsModel(
id=user_id)
if user_id not in subscribers_model_creator.subscriber_ids:
subscribers_model_creator.subscriber_ids.append(user_id)
subscriptions_model_user.creator_ids.append(creator_id)
subscribers_model_creator.update_timestamps()
subscribers_model_creator.put()
subscriptions_model_user.update_timestamps()
subscriptions_model_user.put()
def unsubscribe_from_creator(user_id, creator_id):
"""Unsubscribe a user from a creator.
WARNING: Callers of this function should ensure that the user_id and
creator_id are valid.
Args:
user_id: str. The user ID of the subscriber.
creator_id: str. The user ID of the creator.
"""
subscribers_model_creator = user_models.UserSubscribersModel.get(
creator_id, strict=False)
subscriptions_model_user = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
if user_id in subscribers_model_creator.subscriber_ids:
subscribers_model_creator.subscriber_ids.remove(user_id)
subscriptions_model_user.creator_ids.remove(creator_id)
subscribers_model_creator.update_timestamps()
subscribers_model_creator.put()
subscriptions_model_user.update_timestamps()
subscriptions_model_user.put()
def get_all_threads_subscribed_to(user_id):
"""Returns a list with ids of all the feedback and suggestion threads to
which the user is subscribed.
WARNING: Callers of this function should ensure that the user_id is valid.
Args:
user_id: str. The user ID of the subscriber.
Returns:
list(str). IDs of all the feedback and suggestion threads to
which the user is subscribed.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.general_feedback_thread_ids
if subscriptions_model else [])
def get_all_creators_subscribed_to(user_id):
"""Returns a list with ids of all the creators to which this learner has
subscribed.
WARNING: Callers of this function should ensure that the user_id is valid.
Args:
user_id: str. The user ID of the subscriber.
Returns:
list(str). IDs of all the creators to which this learner has
subscribed.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.creator_ids
if subscriptions_model else [])
def get_all_subscribers_of_creator(user_id):
"""Returns a list with ids of all users who have subscribed to this
creator.
WARNING: Callers of this function should ensure that the user_id is valid.
Args:
user_id: str. The user ID of the subscriber.
Returns:
list(str). IDs of all users who have subscribed to this creator.
"""
subscribers_model = user_models.UserSubscribersModel.get(
user_id, strict=False)
return (
subscribers_model.subscriber_ids
if subscribers_model else [])
def get_exploration_ids_subscribed_to(user_id):
"""Returns a list with ids of all explorations that the given user
subscribes to.
WARNING: Callers of this function should ensure that the user_id is valid.
Args:
user_id: str. The user ID of the subscriber.
Returns:
list(str). IDs of all explorations that the given user
subscribes to.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.exploration_ids
if subscriptions_model else [])
def subscribe_to_collection(user_id, collection_id):
"""Subscribes a user to a collection.
WARNING: Callers of this function should ensure that the user_id and
collection_id are valid.
Args:
user_id: str. The user ID of the new subscriber.
collection_id: str. The collection ID.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
if not subscriptions_model:
subscriptions_model = user_models.UserSubscriptionsModel(id=user_id)
if collection_id not in subscriptions_model.collection_ids:
subscriptions_model.collection_ids.append(collection_id)
subscriptions_model.update_timestamps()
subscriptions_model.put()
def get_collection_ids_subscribed_to(user_id):
"""Returns a list with ids of all collections that the given user
subscribes to.
WARNING: Callers of this function should ensure that the user_id is valid.
Args:
user_id: str. The user ID of the subscriber.
Returns:
list(str). IDs of all collections that the given user
subscribes to.
"""
subscriptions_model = user_models.UserSubscriptionsModel.get(
user_id, strict=False)
return (
subscriptions_model.collection_ids
if subscriptions_model else [])
| apache-2.0 | -4,500,942,579,873,539,000 | 33.003704 | 78 | 0.695676 | false | 4.023225 | false | false | false |
twchoi/Netmod_git | test/script/hit1.py | 1 | 1416 | #!/usr/bin/env python
import sys, math
from pylab import *
def hitf(a):
r = 0
if (a >= 1):
r = 1
else:
r = 0
return r
def mean(b):
return float(sum(b)) / float(len(b))
#infilename = "d2out"
infilename = sys.argv[1]
#alpha = float(sys.argv[2])
ifile = open(infilename, 'r')
no_cache = 0
time = []
stab_cost = []
cache_time = []
#tmp_cr = []
tmp_cr = 0
for line in ifile:
data = line.split()
action = data[1]
if action == "caching":
#print action
no_cache = no_cache + 1
cache_time.append(float(data[0]))
elif action == "Node_Join":
#print action
if no_cache != 0:
time.append(float(data[0]))
cost = float(data[6])
cost_rate = cost / float(no_cache)
#tmp_cr.append(cost_rate)
tmp_cr = tmp_cr + cost_rate
f_res = float(tmp_cr) / float(len(stab_cost) + 1)
#stab_cost.append(mean(tmp_cr))
stab_cost.append(f_res)
#print no_cache, cost, cost_rate
else:
st = 0
#leave or query. handle later
#print stab_cost
print len(time), len(stab_cost)
#print cache_time
for i in xrange(len(time)):
print time[i], stab_cost[i]
print ' '
for j in xrange(len(cache_time)):
print cache_time[j], 0
"""
figure(1)
plot(time, stab_cost)
xlabel('time')
ylabel('number of messages per unique object')
title('Stabilization Cost')
grid(True)
#figure(2)
#plot(num_ave)
#figure(3)
#plot(cost)
show()
"""
| gpl-2.0 | -8,745,007,664,037,984,000 | 19.823529 | 56 | 0.59887 | false | 2.728324 | false | false | false |
rebelact/mailsync-app | mailsync/models/setup.py | 1 | 2889 | import logging
from mailsync.models.base import BaseModel
from mailsync.models.adapter import adapter
from mailsync.models.sqlite import details_table, columns_table, database_table, provider_table, lists_table
class SetupModel(BaseModel):
def __init__(self):
super(SetupModel, self).__init__()
def _get_provider(self, provider_name):
providers = {
"mailchimp": "MailChimp",
"campaignmonitor": "Campaign Monitor"
}
return providers[provider_name]
def _get_last_inserted_id(self, list_data, table, primary_key):
# nothing synced for this list
if not (list_data.last_inserted_id and list_data.status and list_data.inserted_rows and list_data.rows_to_be_inserted):
last_inserted_id = 0
elif list_data.last_inserted_id:
last_inserted_id = list_data.last_inserted_id
else:
last_inserted_id = adapter.get_last_inserted_id(table, primary_key, "first")
return last_inserted_id
def get_synced_lists(self):
synced_lists = []
for synced_list_data in details_table.get_details():
details_id = synced_list_data._id
database_data = database_table.find_detail(details_id)
provider_data = provider_table.find_detail(details_id)
list_data = lists_table.find_detail(details_id)
columns_data = columns_table.find_details(details_id)
if database_data and provider_data and list_data and columns_data:
driver = self.get_driver(database_data)
adapter.setup(driver)
table = database_data.table
primary_key = adapter.get_primary_key(table)
last_inserted_id = self._get_last_inserted_id(list_data, table, primary_key)
columns_dict = self.get_columns(columns_data)
try:
rows_to_be_synced = adapter.get_rows_to_be_inserted(table, columns_dict, primary_key, last_inserted_id)
provider_name = self._get_provider(provider_data.provider)
synced_lists.append({
"id": synced_list_data._id,
"name": list_data.name,
"last_synced": synced_list_data.last_synced,
"provider": provider_name,
"database": database_data,
"table": table,
"rows_to_be_synced": len(rows_to_be_synced)
})
except Exception, err:
logging.error(err)
continue
return synced_lists
def check_synced_list(self, list_provider_id):
return lists_table.find_list_by_listid(list_provider_id)
setup_model = SetupModel() | mit | -6,122,042,490,357,596,000 | 37.026316 | 127 | 0.571132 | false | 4.069014 | false | false | false |
zynthian/zynthian-ui | zynlibs/jackpeak/jackpeak.py | 1 | 1763 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#********************************************************************
# ZYNTHIAN PROJECT: Jackpeak Python Wrapper
#
# A Python wrapper for jackpeak library
#
# Copyright (C) 2019 Brian Walton <[email protected]>
#
#********************************************************************
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the LICENSE.txt file.
#
#********************************************************************
from ctypes import *
from os.path import dirname, realpath
#-------------------------------------------------------------------------------
# Jackpeak Library Wrapper
#-------------------------------------------------------------------------------
lib_jackpeak=None
def lib_jackpeak_init():
global lib_jackpeak
try:
lib_jackpeak=cdll.LoadLibrary(dirname(realpath(__file__))+"/build/libjackpeak.so")
lib_jackpeak.initJackpeak()
lib_jackpeak.getPeak.restype = c_float
lib_jackpeak.getPeakRaw.restype = c_float
lib_jackpeak.getHold.restype = c_float
except Exception as e:
lib_jackpeak=None
print("Can't init jackpeak library: %s" % str(e))
return lib_jackpeak
def get_lib_jackpeak():
return lib_jackpeak
#-------------------------------------------------------------------------------
| gpl-3.0 | 3,813,808,966,403,455,000 | 33.568627 | 84 | 0.557005 | false | 4.207637 | false | false | false |
kamyu104/LeetCode | Python/rotate-string.py | 2 | 3103 | # Time: O(n)
# Space: O(1)
# We are given two strings, A and B.
#
# A shift on A consists of taking string A and moving the leftmost character to the rightmost position.
# For example, if A = 'abcde', then it will be 'bcdea' after one shift on A. Return True
# if and only if A can become B after some number of shifts on A.
#
# Example 1:
# Input: A = 'abcde', B = 'cdeab'
# Output: true
#
# Example 2:
# Input: A = 'abcde', B = 'abced'
# Output: false
#
# Note:
# - A and B will have length at most 100.
# Rabin-Karp Algorithm (rolling hash)
class Solution(object):
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
def check(index):
return all(A[(i+index) % len(A)] == c
for i, c in enumerate(B))
if len(A) != len(B):
return False
M, p = 10**9+7, 113
p_inv = pow(p, M-2, M)
b_hash, power = 0, 1
for c in B:
b_hash += power * ord(c)
b_hash %= M
power = (power*p) % M
a_hash, power = 0, 1
for i in xrange(len(B)):
a_hash += power * ord(A[i%len(A)])
a_hash %= M
power = (power*p) % M
if a_hash == b_hash and check(0): return True
power = (power*p_inv) % M
for i in xrange(len(B), 2*len(A)):
a_hash = (a_hash-ord(A[(i-len(B))%len(A)])) * p_inv
a_hash += power * ord(A[i%len(A)])
a_hash %= M
if a_hash == b_hash and check(i-len(B)+1):
return True
return False
# Time: O(n)
# Space: O(n)
# KMP algorithm
class Solution2(object):
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
def strStr(haystack, needle):
def KMP(text, pattern):
prefix = getPrefix(pattern)
j = -1
for i in xrange(len(text)):
while j > -1 and pattern[j + 1] != text[i]:
j = prefix[j]
if pattern[j + 1] == text[i]:
j += 1
if j == len(pattern) - 1:
return i - j
return -1
def getPrefix(pattern):
prefix = [-1] * len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j > -1 and pattern[j + 1] != pattern[i]:
j = prefix[j]
if pattern[j + 1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
if not needle:
return 0
return KMP(haystack, needle)
if len(A) != len(B):
return False
return strStr(A*2, B) != -1
# Time: O(n^2)
# Space: O(n)
class Solution3(object):
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
return len(A) == len(B) and B in A*2
| mit | -7,140,308,830,570,170,000 | 25.75 | 103 | 0.44183 | false | 3.447778 | false | false | false |
adiyoss/DeepVOT | post_process/analyze.py | 1 | 3854 | from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
def read_file(path):
y = list()
with open(path) as fid:
for line in fid.readlines():
curr_y = list()
vals = line.split()
for val in vals:
curr_y.append(int(val))
y.append(curr_y)
fid.close()
return y
y_all = read_file('labels.dat')
y_hat_all = read_file('pred.dat')
pos_neg_y = list()
pos_neg_y_hat = list()
for i, y in enumerate(y_all):
if len(y) == 2:
pos_neg_y.append(1)
else:
pos_neg_y.append(0)
if y_hat_all[i][1] == -1:
pos_neg_y_hat.append(1)
else:
pos_neg_y_hat.append(0)
print('Confusion Matrix: ')
print(confusion_matrix(pos_neg_y, pos_neg_y_hat))
print
print('Total Accuracy: %.3f' % accuracy_score(pos_neg_y, pos_neg_y_hat))
print('Precision: %.3f' % precision_score(pos_neg_y, pos_neg_y_hat))
print('Recall: %.3f' % recall_score(pos_neg_y, pos_neg_y_hat))
print('F1-Score: %.3f' % f1_score(pos_neg_y, pos_neg_y_hat))
cumulative_onset_pos = 0.0
cumulative_offset_pos = 0.0
count_pos = 0
cumulative_onset_neg = 0.0
cumulative_offset_neg = 0.0
count_neg = 0
for i, y in enumerate(y_all):
if len(y) == 2:
cumulative_onset_pos += abs(y[0] - y_hat_all[i][0])
cumulative_offset_pos += abs(y[1] - y_hat_all[i][len(y_hat_all[i]) - 1])
count_pos += 1
if len(y) == 3:
if y_hat_all[i][1] == -1:
cumulative_onset_neg += abs(y[0] - y_hat_all[i][0])
cumulative_offset_neg += abs(y[1] - y_hat_all[i][2])
count_neg += 1
else:
cumulative_onset_neg += abs(y[0] - y_hat_all[i][0])
cumulative_offset_neg += abs(y[1] - y_hat_all[i][1])
count_neg += 1
print('Pos')
print('Average onset: %.3f' % (cumulative_onset_pos / count_pos))
print('Average offset: %.3f' % (cumulative_offset_pos / count_pos))
print('Neg')
print('Average onset: %.3f' % (cumulative_onset_neg / count_neg))
print('Average offset: %.3f' % (cumulative_offset_neg / count_neg))
cumulative_loss = 0.0
gamma_m = 4
gamma_0 = 100
count = 0
ms2 = 0
ms5 = 0
ms10 = 0
ms15 = 0
ms25 = 0
ms50 = 0
duration = 0
duration_hat = 0
flag = 0
for i, y in enumerate(y_all):
if len(y) == 2 and y_hat_all[i][1] == -1:
# duration = y[1] - y[0]
# duration_hat = y_hat_all[i][2] - y_hat_all[i][0]
# cumulative_loss += max(0, abs((y_hat_all[i][2] - y_hat_all[i][0]) - (y[1] - y[0]) - gamma_m))
flag = 1
elif len(y) == 3 and y_hat_all[i][1] != -1:
duration = y[1] - y[0]
duration_hat = y_hat_all[i][1] - y_hat_all[i][0]
cumulative_loss += max(0, abs((y_hat_all[i][1] - y_hat_all[i][0]) - (y[1] - y[0]) - gamma_m))
count += 1
elif len(y) == 2 and y_hat_all[i][1] != -1:
# duration = y[1] - y[0]
# duration_hat = y_hat_all[i][1] - y_hat_all[i][0]
# cumulative_loss += gamma_0
flag = 1
else:
duration = y[1] - y[0]
duration_hat = y_hat_all[i][2] - y_hat_all[i][0]
cumulative_loss += gamma_0
count += 1
if flag == 0:
diff = duration - duration_hat
if diff < 2:
ms2 += 1
if diff < 5:
ms5 += 1
if diff < 10:
ms10 += 1
if diff < 15:
ms15 += 1
if diff < 25:
ms25 += 1
if diff < 50:
ms50 += 1
flag = 0
print(cumulative_loss / float(count))
print("==> 2ms > %.3f%%" % (100 * ms2 / float(count)))
print("==> 5ms > %.3f%%" % (100 * ms5 / float(count)))
print("==> 10ms > %.3f%%" % (100 * ms10 / float(count)))
print("==> 15ms > %.3f%%" % (100 * ms15 / float(count)))
print("==> 25ms > %.3f%%" % (100 * ms25 / float(count)))
print("==> 50ms > %.3f%%" % (100 * ms50 / float(count)))
| mit | -1,620,741,362,476,978,700 | 29.346457 | 103 | 0.525169 | false | 2.630717 | false | false | false |
wavesoft/creditpiggy | creditpiggy-server/creditpiggy/core/migrations/0006_piggyuser_uuid.py | 1 | 1150 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import creditpiggy.core.models
def generate_uuid(apps, schema_editor):
PiggyUser = apps.get_model('core', 'PiggyUser')
for user in PiggyUser.objects.all().iterator():
user.uuid = creditpiggy.core.models.new_uuid()
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20150607_0838'),
]
operations = [
migrations.AddField(
model_name='piggyuser',
name='uuid',
field=models.CharField(default=creditpiggy.core.models.new_uuid, help_text=b'Unique user identification string', max_length=32),
preserve_default=False,
),
migrations.RunPython(
generate_uuid,
),
migrations.AlterField(
model_name='piggyuser',
name='uuid',
field=models.CharField(default=creditpiggy.core.models.new_uuid, help_text=b'Unique user identification string', unique=True, max_length=32, db_index=True),
preserve_default=True,
),
]
| gpl-2.0 | 3,596,210,663,184,882,000 | 30.944444 | 168 | 0.623478 | false | 3.745928 | false | false | false |
isb-cgc/examples-Python | python/pairwise/archive/bq_filter_file.py | 1 | 4469 | '''
Copyright 2015, Institute for Systems Biology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************
Using python to generate bigqueries.
Here we use the 'filter file' to create subusets of data
to download.
******************************************************
First need to install the BigQuery API
pip3 install --upgrade google-cloud-bigquery
The first time I ran the installer, there was an error. But just running pip3
again seemed to work.
Also we need to get authenticated. At the command line we:
gcloud auth application-default login
# table:isb-cgc.tcga_201510_alpha.DNA_Methylation_betas
# tablevar:Probe_Id
# annot:isb-cgc.platform_reference.methylation_annotation
# annotvar:IlmnID
# idvar:ParticipantBarcode
# valvar:Beta_Value
# pivot:UCSC.RefGene_Name # after the annotation join
# filter:SampleTypeLetterCode='TP'
# filter:Study='BRCA'
# filter:UCSC.RefGene_Name IN ('ACSM5','NAP1L4','SULF2')
# limit:100
'''
from google.cloud import bigquery
import argparse
import sys
ko = ['idvar', 'valvar', 'pivot', 'table', 'annot', 'tablevar', 'annotvar', 'filter', 'limit']
# Some queries must be annoated before running pairwise
## to this point, some annotation fields are nested
## so we need to check the schema first.
def checkSchemas(client,ffd):
# have to use a client pointed to the table that you want to query
ts = ffd['table'].split('.')
d1 = client.dataset(ts[1])
t1 = d1.table(ts[2])
t1.reload()
# then t1 contains a list of schema fields
print(t1.schema[0].description)
print(t1.schema[0].name)
print(t1.schema[0].field_type)
print(t1.schema[0].mode)
# will have to check if any of the fields are records
# or structs or arrays.
# check that dictionary names are
# in the allowed set.
def checkQuery(client, ffd):
# make sure the query contains only allowed keys in KO.
ks = list(ffd.keys())
if any([x not in ko for x in ks]):
print("Removing items from the filter file:")
print([x for x in ks if x not in ko])
filtered_dict = {key: value for key, value in ffd.items() if key in ko}
filtered_dict = checkSchemas(client, filtered_dict)
return(filtered_dict)
def keyOrder(ffdict):
ks = list(ffdict.keys())
kd = [x for x in ko if x in ks]
return(kd)
def readFilterFile(filepath):
# build a dictionary of query terms
fin = open(filepath, 'r')
ffdict = {}
for line in fin:
strings = line.strip().split(':')
k, v = [s.strip() for s in strings]
if k not in ffdict:
ffdict[k] = v
else:
ffdict[k] = ffdict[k] + " AND " + v
fin.close()
return(ffdict)
def buildQuery(client, filename):
ffd = readFilterFile(filename)
ffd = checkQuery(client, ffd)
query = "SELECT \n"
for key in keyOrder(ffd): # queries need to have a particular order
if key in ['idvar', 'valvar']:
query += ffd[key] + ",\n"
elif key == 'table':
query += "FROM `" + ffd[key] + "`\n WHERE \n"
elif key == 'limit':
query += "LIMIT " + ffd[key] + " \n"
else:
query += ffd[key] + " \n"
return(query)
def bq(args):
client = bigquery.Client(project=args.proj)
queryString = buildQuery(client, args.ff1)
print("*****************************************")
print(queryString)
print("*****************************************")
#query_results = client.run_sync_query(queryString)
#query_results.use_legacy_sql = False
#query_results.run()
#print(query_results.total_rows)
#for qi in query_results.rows:
# print(qi)
print("done")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="BigQuery PairWise")
parser.add_argument("prj", help="google project ID")
parser.add_argument("ff1", help="filter file")
args = parser.parse_args()
bq(args)
| apache-2.0 | 3,670,557,857,444,028,400 | 30.921429 | 94 | 0.631237 | false | 3.491406 | false | false | false |
AgnezIO/agnez | agnez/grid.py | 2 | 7483 | import numpy as np
# from gaborfitting import *
import theano
import theano.tensor as T
def scale_norm(X):
X = X - X.min()
scale = (X.max() - X.min())
return X / scale
def img_grid(X, rows_cols=None, rescale=True):
"""Image Grid: modified from jbornschein/draw
Parameters:
===========
X : np.array, images (samples, channels, height, width)
rows_cols : list, grid dimensions (rows, cols)
rescale : bool
Returns:
========
I : np.array, grid image
"""
N, channels, height, width = X.shape
if rows_cols is None:
sroot = np.sqrt(X.shape[0])
cols = int(np.ceil(sroot))
rows = int(np.floor(sroot)) + 1
else:
rows, cols = rows_cols
total_height = int(rows * height + rows - 1)
total_width = int(cols * width + cols - 1)
if rescale:
X = scale_norm(X)
I = np.zeros((channels, total_height, total_width))
I.fill(1)
for i in xrange(N):
r = i // cols
c = i % cols
if rescale:
this = X[i]
else:
this = scale_norm(X[i])
offset_y, offset_x = r*height+r, c*width+c
I[0:channels, offset_y:(offset_y+height), offset_x:(offset_x+width)] = this
I = (255*I).astype(np.uint8)
if(channels == 1):
out = I.reshape((total_height, total_width))
else:
out = np.dstack(I).astype(np.uint8)
return out
def grid2d(X, example_width=False, display_cols=False, pad_row=1,
pad_col=1, rescale=True):
"""Display weights in a nice grid
This function assumes that each row of the X is an image weight to be
resized to a square. After that it creates a 2D grid with with all
the squares.
Parameters
----------
X : `numpy.array`
array with each filter to be transformed to an image on the rows
example_width: int
defines the width of the images in the rows X if they are
not square
display_cols: bool
pad_row: int
integer number of pixels between up/down neighbors
pad_col: int
integer number of pixels between left/right neighbors
Adapted from https://github.com/martinblom/py-sparse-filtering
"""
m, n = X.shape
if not example_width:
example_width = int(np.round(np.sqrt(n)))
example_height = n//example_width
# Compute number of items to display
if not display_cols:
display_cols = int(np.sqrt(m))
display_rows = int(np.ceil(m/display_cols))
# Setup blank display
display_array = -np.ones((pad_row+display_rows * (example_height+pad_row),
pad_col+display_cols * (example_width+pad_col)))
# Copy each example into a patch on the display array
curr_ex = 0
for j in range(display_rows):
for i in range(display_cols):
if curr_ex >= m:
break
# Copy the patch
# Get the max value of the patch
max_val = abs(X[curr_ex, :]).max()
i_inds = example_width*[pad_row+j * (example_height+pad_row)+q for q in range(example_height)]
j_inds = [pad_col+i * (example_width+pad_col)+q
for q in range(example_width)
for nn in range(example_height)]
try:
newData = (X[curr_ex, :].reshape((example_height,
example_width))).T/max_val
except:
raise ValueError("expected {}, got {}".format(X[curr_ex, :].shape), (example_height, example_width))
display_array[i_inds, j_inds] = newData.flatten()
curr_ex += 1
if curr_ex >= m:
break
visual = (display_array - display_array.min()) / (display_array.max() - display_array.min())
visual = np.nan_to_num(visual)
ret = visual if rescale else display_array
ret = (255*ret).astype(np.uint8)
return ret
def pref_grid(above, bellow, num_preferred=9, abs_value=True, pad_row=5):
"""Display the weights that the layer above prefers on the layer below
This function looks for the `num_preferred` larger values on the layer
`above` and get their indexes. Those indexes are used to retrieve the
preferred weights on the layer `bellow`. After all, those preferred
vectors are organized with `meth`:grid2d.
Parameters
----------
above : `numpy.array`
matrix with each filter to be transformed to an image on the rows
bellow : `numpy.array`
matrix with each filter to be transformed to an image on the rows
num_preferred: int
number of preferred weights to be plotted
abs_value: bool
if True chooses the preferred as the weights associated with
maximum absolute activation. Else, uses only the maximum (positve)
values.
pad_row: int
integer number of pixels between up/down neighbors
"""
# idx = np.random.randint(above.shape[0], size=num_preferred)
R = np.abs(above) if abs_value else above
X = np.zeros((num_preferred**2, bellow.shape[1]))
for i, w in enumerate(R):
s = np.argsort(w)[::-1]
prefs = s[:num_preferred]
first = i*num_preferred
last = (i+1)*num_preferred
X[first:last] = bellow[prefs]
visual = grid2d(X, pad_col=1, pad_row=pad_row)
return visual[pad_row-1:-pad_row+1, :]
class DeepPref():
"""Similar do pref_grid but for deep networks.
Checks what are the weights in layers[0] that layers[-1] prefers.
Parameters
----------
model: `keras.models.Sequential`
layer: int, observed layer
num_preferred: int
number of preferred weights to be plotted
abs_value: bool
if True chooses the preferred as the weights associated with
maximum absolute activation. Else, uses only the maximum (positve)
values.
pad_row: int
integer number of pixels between horizontal neighbors
"""
def __init__(self, model, layer, num_preferred=10, abs_value=True,
pad_row=5, sum_preferences=False):
self.model = model
self.layer = layer
self.num_preferred = num_preferred
self.abs_value = abs_value
self.pad_row = pad_row
self.sum_preferences = sum_preferences
X = model.get_input()
Y = model.layers[layer].get_output()
if self.sum_preferences:
Y = T.nnet.softmax(Y)
self.F = theano.function([X], Y, allow_input_downcast=True)
num_weights_out = model.layers[layer].W.get_value().shape[1]
self.idx = np.random.randint(num_weights_out,
size=num_preferred)
def get_pref(self):
W = self.model.layers[0].W.get_value().T
Y = self.F(W)
R = np.abs(Y[:, self.idx]) if self.abs_value else Y[:, self.idx]
if self.sum_preferences:
X = np.zeros((self.num_preferred, W.shape[1]))
else:
X = np.zeros((self.num_preferred**2, W.shape[1]))
for i, w in enumerate(R.T):
s = np.argsort(w)
prefs = s[:-self.num_preferred-1:-1]
first = i*self.num_preferred
last = (i+1)*self.num_preferred
if self.sum_preferences:
X[i] = (W[prefs]).mean(axis=0)
else:
X[first:last] = W[prefs]
visual = grid2d(X, pad_col=1, pad_row=self.pad_row)
return visual[self.pad_row-1:-self.pad_row+1, :]
| bsd-3-clause | 1,772,152,492,622,840,300 | 33.325688 | 116 | 0.585327 | false | 3.614976 | false | false | false |
Thykof/SafeMyWork | cli/main.py | 1 | 1975 | import threading
import asyncio
import click
from safer.safe import Safer
nb_copies_done = 0
@click.command()
@click.option('-d', '--delta', default=10, help='Number of minutes between copies.')
@click.option('-s', '--safe_dir', default=None, help='Destination folder.')
@click.option('-w', '--delicate_dirs', required=True, help='Folder to save.')
@click.option('-n', '--count', default=0, help='Number of iterations, 0 for infinite loop (default 0).')
@click.option('-t', '--type', default='filter', help='`copy` or `filter` or `update` (default `filter`).')
@click.option('--extentions', default='', help='File extentions to exclude separeted by comma (pdf, txt...) (useless when `type` is copy)')
@click.option('--dirpath', default='', help='A path to exclude (useless when `type` is copy)')
@click.option('--dirname', default='', help='A folder name to exclude (useless when `type` is copy)')
def scan(delta, safe_dir, delicate_dirs, count, type, extentions, dirpath, dirname):
config = {
'timedelta': delta,
'safe_dir': safe_dir,
'delicate_dirs': [delicate_dirs],
'advanced': True, # disable MAX_DIR_SIZE limit
# Exclusion rules:
'dirname': [dirname],
'dirpath': [dirpath],
'filename': [],
'extention': extentions.split(',') if extentions != '' else [],
# other options
'local_path': '',
'external_path': ''
}
loop = asyncio.get_event_loop()
safer = Safer(config=config)
if type == 'filter':
func = lambda: safer.save_with_filters(loop)
elif type == 'copy':
func = safer.copy_files
elif type == 'update':
func = lambda: safer.update(loop)
def perpetual_scan():
global nb_copies_done
func()
nb_copies_done += 1
if nb_copies_done < count or count == 0:
timer = threading.Timer(delta, perpetual_scan)
timer.start()
delta *= 60
perpetual_scan()
| gpl-3.0 | 4,522,782,964,535,768,600 | 33.051724 | 139 | 0.608608 | false | 3.610603 | false | false | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/templates_py/operator_modal_view3d_raycast.py | 4 | 3608 | import bpy
from bpy_extras import view3d_utils
def main(context, event):
"""Run this function on left mouse, execute the ray cast"""
# get the context arguments
scene = context.scene
region = context.region
rv3d = context.region_data
coord = event.mouse_region_x, event.mouse_region_y
# get the ray from the viewport and mouse
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + view_vector
def visible_objects_and_duplis():
"""Loop over (object, matrix) pairs (mesh only)"""
for obj in context.visible_objects:
if obj.type == 'MESH':
yield (obj, obj.matrix_world.copy())
if obj.dupli_type != 'NONE':
obj.dupli_list_create(scene)
for dob in obj.dupli_list:
obj_dupli = dob.object
if obj_dupli.type == 'MESH':
yield (obj_dupli, dob.matrix.copy())
obj.dupli_list_clear()
def obj_ray_cast(obj, matrix):
"""Wrapper for ray casting that moves the ray into object space"""
# get the ray relative to the object
matrix_inv = matrix.inverted()
ray_origin_obj = matrix_inv * ray_origin
ray_target_obj = matrix_inv * ray_target
ray_direction_obj = ray_target_obj - ray_origin_obj
# cast the ray
success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)
if success:
return location, normal, face_index
else:
return None, None, None
# cast rays and find the closest object
best_length_squared = -1.0
best_obj = None
for obj, matrix in visible_objects_and_duplis():
if obj.type == 'MESH':
hit, normal, face_index = obj_ray_cast(obj, matrix)
if hit is not None:
hit_world = matrix * hit
scene.cursor_location = hit_world
length_squared = (hit_world - ray_origin).length_squared
if best_obj is None or length_squared < best_length_squared:
best_length_squared = length_squared
best_obj = obj
# now we have the object under the mouse cursor,
# we could do lots of stuff but for the example just select.
if best_obj is not None:
best_obj.select = True
context.scene.objects.active = best_obj
class ViewOperatorRayCast(bpy.types.Operator):
"""Modal object selection with a ray cast"""
bl_idname = "view3d.modal_operator_raycast"
bl_label = "RayCast View Operator"
def modal(self, context, event):
if event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE'}:
# allow navigation
return {'PASS_THROUGH'}
elif event.type == 'LEFTMOUSE':
main(context, event)
return {'RUNNING_MODAL'}
elif event.type in {'RIGHTMOUSE', 'ESC'}:
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
if context.space_data.type == 'VIEW_3D':
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "Active space must be a View3d")
return {'CANCELLED'}
def register():
bpy.utils.register_class(ViewOperatorRayCast)
def unregister():
bpy.utils.unregister_class(ViewOperatorRayCast)
if __name__ == "__main__":
register()
| gpl-3.0 | -7,564,040,071,069,720,000 | 32.100917 | 95 | 0.594789 | false | 3.830149 | false | false | false |
drx/archfinch | wiki/views.py | 1 | 2132 | from archfinch.wiki.models import PageForm, Page, Revision, RevisionText
from archfinch.main.models import Item
from django.shortcuts import get_object_or_404
from archfinch.utils import render_to_response
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.utils.http import base36_to_int
from django.contrib.auth.decorators import login_required
from django.template.defaultfilters import slugify
from lazysignup.decorators import allow_lazy_user
@allow_lazy_user
def edit(request, page_id=None, item_id=None):
'''
Lets the user edit a wiki page.
'''
# get the page
if page_id is None and item_id is None:
from django.core.exceptions import SuspiciousOperation
raise SuspiciousOperation('Page id and item id were both empty')
if page_id is None:
item = get_object_or_404(Item, pk=base36_to_int(item_id))
page = item.profile.page
redirect_url = reverse('item', args=[item_id, slugify(item.name)])
else:
page = get_object_or_404(Page, pk=base36_to_int(page_id))
redirect_url = reverse('wiki-page', args=[page_id])
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if page is None:
page = Page()
page.save()
item.profile.page = page
item.profile.save()
text = form.cleaned_data['text']
revision_text = RevisionText(text=text)
revision_text.save()
page.revisions.create(text=revision_text, user=request.user)
request.user.add_points(5)
return HttpResponseRedirect(redirect_url)
else:
if page is not None:
try:
text = page.current().text.render()
except Revision.DoesNotExist:
text = ''
else:
text = ''
form = PageForm(initial={'text': text})
return render_to_response('wiki/edit.html', locals(), context_instance=RequestContext(request))
| mit | -2,634,413,105,738,084,400 | 33.95082 | 99 | 0.638368 | false | 4.060952 | false | false | false |
dave-tucker/spectrometer | spectrometer/processor/default_data_processor.py | 3 | 8129 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import hashlib
import json
from github import MainClass
import six
from spectrometer.openstack.common import log as logging
from spectrometer.processor import normalizer
from spectrometer.processor import record_processor
from spectrometer.processor import utils
from spectrometer.processor import vcs
LOG = logging.getLogger(__name__)
def _check_default_data_change(runtime_storage_inst, default_data):
h = hashlib.new('sha1')
h.update(json.dumps(default_data))
digest = h.hexdigest()
p_digest = runtime_storage_inst.get_by_key('default_data_digest')
if digest == p_digest:
LOG.debug('No changes in default data, sha1: %s', digest)
return False
LOG.debug('Default data has changes, sha1: %s', digest)
runtime_storage_inst.set_by_key('default_data_digest', digest)
return True
def _retrieve_project_list_from_github(project_sources):
LOG.info('Retrieving project list from GitHub')
github = MainClass.Github(timeout=60)
repos = []
for project_source in project_sources:
organization = project_source['organization']
LOG.debug('Get list of projects for organization %s', organization)
try:
github_repos = github.get_organization(organization).get_repos()
except Exception as e:
LOG.exception(e)
LOG.warn('Fail to retrieve list of projects. Keep it unmodified')
return False
exclude = set(project_source.get('exclude', []))
for repo in github_repos:
if repo.name not in exclude:
r = {
'branches': ['master'],
'module': repo.name,
'organization': organization,
'uri': repo.git_url,
'releases': []
}
repos.append(r)
LOG.debug('Project is added to default data: %s', r)
return repos
def _create_module_groups_for_project_sources(project_sources, repos):
organizations = collections.defaultdict(list)
for repo in repos:
organizations[repo['organization']].append(repo['module'])
ps_organizations = dict([(ps.get('organization'),
ps.get('module_group_name') or
ps.get('organization'))
for ps in project_sources])
module_groups = []
for ogn, modules in six.iteritems(organizations):
module_groups.append(utils.make_module_group(
ogn, name=ps_organizations.get(ogn, ogn), modules=modules,
tag='organization'))
return module_groups
def _update_project_list(default_data):
configured_repos = set([r['uri'] for r in default_data['repos']])
repos = _retrieve_project_list_from_github(default_data['project_sources'])
if repos:
default_data['repos'] += [r for r in repos
if r['uri'] not in configured_repos]
default_data['module_groups'] += _create_module_groups_for_project_sources(
default_data['project_sources'], default_data['repos'])
def _store_users(runtime_storage_inst, users):
for user in users:
stored_user = utils.load_user(runtime_storage_inst, user['user_id'])
if stored_user:
stored_user.update(user)
user = stored_user
utils.store_user(runtime_storage_inst, user)
def _store_companies(runtime_storage_inst, companies):
domains_index = {}
for company in companies:
for domain in company['domains']:
domains_index[domain] = company['company_name']
if 'aliases' in company:
for alias in company['aliases']:
normalized_alias = utils.normalize_company_name(alias)
domains_index[normalized_alias] = company['company_name']
normalized_company_name = utils.normalize_company_name(
company['company_name'])
domains_index[normalized_company_name] = company['company_name']
runtime_storage_inst.set_by_key('companies', domains_index)
def _store_module_groups(runtime_storage_inst, module_groups):
stored_mg = runtime_storage_inst.get_by_key('module_groups') or {}
for mg in module_groups:
name = mg['module_group_name']
module_group_id = mg.get('id') or name
stored_mg[module_group_id] = utils.make_module_group(
module_group_id, name=name, modules=mg['modules'],
tag=mg.get('tag', 'group'))
runtime_storage_inst.set_by_key('module_groups', stored_mg)
STORE_FUNCS = {
'users': _store_users,
'companies': _store_companies,
'module_groups': _store_module_groups,
}
def _store_default_data(runtime_storage_inst, default_data):
normalizer.normalize_default_data(default_data)
LOG.debug('Update runtime storage with default data')
for key, value in six.iteritems(default_data):
if key in STORE_FUNCS:
STORE_FUNCS[key](runtime_storage_inst, value)
else:
runtime_storage_inst.set_by_key(key, value)
def _update_records(runtime_storage_inst, sources_root):
LOG.debug('Update existing records')
release_index = {}
for repo in utils.load_repos(runtime_storage_inst):
vcs_inst = vcs.get_vcs(repo, sources_root)
release_index.update(vcs_inst.get_release_index())
record_processor_inst = record_processor.RecordProcessor(
runtime_storage_inst)
record_processor_inst.update(release_index)
def _get_changed_member_records(runtime_storage_inst, record_processor_inst):
for record in runtime_storage_inst.get_all_records():
if record['record_type'] == 'member' and 'company_name' in record:
company_draft = record['company_draft']
company_name = record_processor_inst.domains_index.get(
utils.normalize_company_name(company_draft)) or company_draft
if company_name != record['company_name']:
record['company_name'] = company_name
yield record
def _update_members_company_name(runtime_storage_inst):
LOG.debug('Update company names for members')
record_processor_inst = record_processor.RecordProcessor(
runtime_storage_inst)
member_iterator = _get_changed_member_records(runtime_storage_inst,
record_processor_inst)
for record in member_iterator:
company_name = record['company_name']
user = utils.load_user(runtime_storage_inst, record['user_id'])
user['companies'] = [{
'company_name': company_name,
'end_date': 0,
}]
user['company_name'] = company_name
utils.store_user(runtime_storage_inst, user)
LOG.debug('Company name changed for user %s', user)
record_id = record['record_id']
runtime_storage_inst.memcached.set(
runtime_storage_inst._get_record_name(record_id), record)
runtime_storage_inst._commit_update(record_id)
def process(runtime_storage_inst, default_data, sources_root, force_update):
LOG.debug('Process default data')
dd_changed = _check_default_data_change(runtime_storage_inst, default_data)
if 'project_sources' in default_data:
_update_project_list(default_data)
if dd_changed or force_update:
_store_default_data(runtime_storage_inst, default_data)
_update_records(runtime_storage_inst, sources_root)
_update_members_company_name(runtime_storage_inst)
| apache-2.0 | -3,398,933,526,875,077,600 | 34.810573 | 79 | 0.644975 | false | 3.940378 | false | false | false |
Netflix/security_monkey | security_monkey/auditors/s3.py | 1 | 4689 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditors.s3
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from six import text_type
from security_monkey.auditors.resource_policy_auditor import ResourcePolicyAuditor
from security_monkey.auditor import Entity
from security_monkey.watchers.s3 import S3
from security_monkey.datastore import Account
class S3Auditor(ResourcePolicyAuditor):
index = S3.index
i_am_singular = S3.i_am_singular
i_am_plural = S3.i_am_plural
def __init__(self, accounts=None, debug=False):
super(S3Auditor, self).__init__(accounts=accounts, debug=debug)
self.policy_keys = ['Policy']
def prep_for_audit(self):
super(S3Auditor, self).prep_for_audit()
self.FRIENDLY_S3NAMES = [text_type(account['s3_name']).lower() for account in self.OBJECT_STORE['ACCOUNTS']['DESCRIPTIONS'] if account['label'] == 'friendly']
self.THIRDPARTY_S3NAMES = [text_type(account['s3_name']).lower() for account in self.OBJECT_STORE['ACCOUNTS']['DESCRIPTIONS'] if account['label'] == 'thirdparty']
self.FRIENDLY_S3CANONICAL = [text_type(account['s3_canonical_id']).lower() for account in self.OBJECT_STORE['ACCOUNTS']['DESCRIPTIONS'] if account['label'] == 'friendly']
self.THIRDPARTY_S3CANONICAL = [text_type(account['s3_canonical_id']).lower() for account in self.OBJECT_STORE['ACCOUNTS']['DESCRIPTIONS'] if account['label'] == 'thirdparty']
self.INTERNET_ACCESSIBLE = [
'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'.lower(),
'http://acs.amazonaws.com/groups/global/AllUsers'.lower()]
self.LOG_DELIVERY = ['http://acs.amazonaws.com/groups/s3/LogDelivery'.lower()]
self.KNOWN_ACLS = self.FRIENDLY_S3NAMES + self.THIRDPARTY_S3NAMES + self.FRIENDLY_S3CANONICAL + self.THIRDPARTY_S3CANONICAL + self.INTERNET_ACCESSIBLE + self.LOG_DELIVERY
def _check_acl(self, item, field, keys, recorder):
acl = item.config.get('Grants', {})
owner = item.config["Owner"]["ID"].lower()
for key in list(acl.keys()):
if key.lower() not in keys:
continue
# Canonical ID == Owning Account - No issue
if key.lower() == owner.lower():
continue
entity = Entity(category='ACL', value=key)
account = self._get_account(field, key)
if account:
entity.account_name=account['name']
entity.account_identifier=account['identifier']
recorder(item, actions=acl[key], entity=entity)
def check_acl_internet_accessible(self, item):
""" Handles AllUsers and AuthenticatedUsers. """
self._check_acl(item, 'aws', self.INTERNET_ACCESSIBLE, self.record_internet_access)
def check_acl_log_delivery(self, item):
self._check_acl(item, 'aws', self.LOG_DELIVERY, self.record_thirdparty_access)
def check_acl_friendly_legacy(self, item):
self._check_acl(item, 's3_name', self.FRIENDLY_S3NAMES, self.record_friendly_access)
def check_acl_thirdparty_legacy(self, item):
self._check_acl(item, 's3_name', self.THIRDPARTY_S3NAMES, self.record_thirdparty_access)
def check_acl_friendly_canonical(self, item):
self._check_acl(item, 's3_canonical_id', self.FRIENDLY_S3CANONICAL, self.record_friendly_access)
def check_acl_thirdparty_canonical(self, item):
self._check_acl(item, 's3_canonical_id', self.THIRDPARTY_S3CANONICAL, self.record_thirdparty_access)
def check_acl_unknown(self, item):
acl = item.config.get('Grants', {})
for key in list(acl.keys()):
if key.lower() not in self.KNOWN_ACLS:
entity = Entity(category='ACL', value=key)
self.record_unknown_access(item, entity, actions=acl[key])
def check_policy_exists(self, item):
policy = item.config.get('Policy', {})
if not policy:
message = "POLICY - No Policy."
self.add_issue(0, message, item)
| apache-2.0 | 4,173,267,436,874,948,600 | 45.89 | 182 | 0.662615 | false | 3.473333 | false | false | false |
miradel51/preprocess | chinese_norm.py | 1 | 1235 | #!/usr/bin/python
#-*-coding:utf-8 -*-
# author: mld
# email: [email protected]
# date : 2017/9/30
# time : 23:42(pm)
import sys
import re
# Remove the whitespace at the begining or ending of the sentence
# Helps to convert full-width and half-width from chinese sentence
# If there is any english characters among the chinise sentence, it can lowercase the non-chinse symbols
def chinese_norm(original_sen):
conver_sen = ""
for char in original_sen:
_code = ord(char)
if _code == 0x3000:
_code = 0x0020
else:
_code -= 0xfee0
# restore the original sentence after converted if it is still not half-width character
if _code < 0x0020 or _code > 0x7e:
conver_sen += char
else:
conver_sen += chr(_code)
conver_sen = re.sub(r'\s+', ' ', conver_sen)
#conver lower
conver_sen = conver_sen.lower()
return conver_sen
if __name__ == '__main__':
ori_ = sys.argv[1]
convert_ = sys.argv[2]
ori_file = open(ori_,"r")
converted_file = open(convert_,"w")
context = ""
for eachline in ori_file:
context = chinese_norm(eachline.strip())
converted_file.write(context)
converted_file.write("\n")
ori_file.close()
converted_file.close()
| mit | -8,588,381,024,596,045,000 | 19.293103 | 104 | 0.640486 | false | 2.940476 | false | false | false |
ganemone/ontheside | server/mod_auth/auth.py | 1 | 1354 | import flask
from flask_login import login_user
from server.models import User
from server.login_manager import login_manager
@login_manager.user_loader
def load_user(user_id: int) -> User:
"""Returns a user from the database based on their id
:param user_id: a users unique id
:return: User object with corresponding id, or none if user does not exist
"""
return User.query.filter_by(id=user_id).first()
def handle_basic_auth(request: flask.Request) -> User:
"""Verifies a request using BASIC auth
:param request: flask request object
:return: User object corresponding to login information, or none if user does not exist
"""
auth = request.authorization
if not auth:
return None
return User.query.filter_by(
username=auth.username,
password=auth.password
).first()
def login(request: flask.Request) -> flask.Response:
"""Handle a login request from a user
:param request: incoming request object
:return: flask response object
"""
user = handle_basic_auth(request)
if user:
login_user(user, remember=True)
return 'OK'
return flask.Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
| mit | 8,787,244,667,091,010,000 | 29.088889 | 91 | 0.680945 | false | 4.10303 | false | false | false |
AlbanoCastroSousa/RESSPyLab | RESSPyLab/summary_tables_maker.py | 1 | 9616 | import pandas as pd
import numpy as np
from .uvc_model import calc_phi_total
def summary_tables_maker_uvc(material_definition, x_file_paths, data, peeq='sat'):
""" Prints to screen the summary tables for the material optimization in LaTeX format for the updated VC model.
:param dict material_definition: Contains information about each material.
:param list x_file_paths: (str) Path for the files that contain the x values for each material.
:param list data: (list, pd.DataFrame) The test data used for calibration of each of the materials.
:param str or float peeq: If 'sat' then calculates the metrics at model saturation, otherwise a finite equivalent
plastic strain.
:return list: The first and second summary tables.
Notes:
- material_definition:
'material_id': (list, str) Identifier for each material.
'load_protocols': (list, str) Labels of the load protocols used, see [1] for definitions.
- The metrics in Table 2 are defined in [2].
- If a finite peeq is provided, the metrics are calculated assuming that peeq increases monotonically
to the provided value.
References:
[1] de Castro e Sousa and Lignos (2017), On the inverse problem of classic nonlinear plasticity models.
[2] de Castro e Sousa and Lignos (2018), Constrained optimization in metal plasticity inverse problems.
"""
# Output column labels
parameter_labels = [r'$E$[GPa]', r'$\sigma_{y,0}$[MPa]', r'$Q_\infty$[MPa]', r'$b$',
r'$D_\infty$[MPa]', r'$a$',
r'$C_1$[MPa]', r'$\gamma_1$', r'$C_2$[MPa]', r'$\gamma_2$',
r'$C_3$[MPa]', r'$\gamma_3$', r'$C_4$[MPa]', r'$\gamma_4$']
metric_labels = [r'$\sigma_{y,0}$[MPa]', r'$\sigma_{sat}$[MPa]', r'$\sigma_{hard}$[MPa]',
r'$\rho^{sat}_{yield}$', r'$\rho^{sat}_{iso}$', r'$\rho^{sat}_{kin}$', r'$\rho^{sat}_{D}$']
n_basic_param = 6
tab_1, tab_2 = _table_maker(material_definition, x_file_paths, data, parameter_labels, metric_labels,
n_basic_param, calc_upd_metrics=True, peeq=peeq)
return [tab_1, tab_2]
def summary_tables_maker_vc(material_definition, x_file_paths, data, peeq='sat'):
""" Prints to screen the summary tables for the material optimization in LaTeX format for the original VC model.
:param dict material_definition: Contains information about each material.
:param list x_file_paths: (str) Path for the files that contain the x values for each material.
:param list data: (list, pd.DataFrame) The test data used for calibration of each of the materials.
:param str or float peeq: If 'sat' then calculates the metrics at model saturation, otherwise a finite equivalent
plastic strain.
:return list: The first and second summary tables.
Notes:
- material_definition:
'material_id': (list, str) Identifier for each material.
'load_protocols': (list, str) Labels of the load protocols used, see [1] for definitions.
- The metrics in Table 2 are defined in [2].
- If a finite peeq is provided, the metrics are calculated assuming that peeq increases monotonically
to the provided value.
References:
[1] de Castro e Sousa and Lignos (2017), On the inverse problem of classic nonlinear plasticity models.
[2] de Castro e Sousa and Lignos (2018), Constrained optimization in metal plasticity inverse problems.
"""
# Output column labels
parameter_labels = [r'$E$[GPa]', r'$\sigma_{y,0}$[MPa]', r'$Q_\infty$[MPa]', r'$b$',
r'$C_1$[MPa]', r'$\gamma_1$', r'$C_2$[MPa]', r'$\gamma_2$',
r'$C_3$[MPa]', r'$\gamma_3$', r'$C_4$[MPa]', r'$\gamma_4$']
metric_labels = [r'$\sigma_{y,0}$[MPa]', r'$\sigma_{sat}$[MPa]', r'$\sigma_{hard}$[MPa]',
r'$\rho^{sat}_{yield}$', r'$\rho^{sat}_{iso}$', r'$\rho^{sat}_{kin}$']
n_basic_param = 4
tab_1, tab_2 = _table_maker(material_definition, x_file_paths, data, parameter_labels, metric_labels,
n_basic_param, calc_upd_metrics=False, peeq=peeq)
return [tab_1, tab_2]
def _table_maker(material_definition, x_file_paths, data, parameter_labels, metric_labels, num_basic_param,
calc_upd_metrics, peeq='sat'):
""" Base function to generate the tables. """
# Set some options for the display
pd.set_option('display.max_columns', 12)
pd.set_option('display.width', 300)
pd.set_option('display.float_format', '{:0.2f}'.format)
# Extract the properties from the definition
material_id = material_definition['material_id']
load_protocols = material_definition['load_protocols']
# Make the first table
phi_values = []
summary_table = pd.DataFrame()
for i, f in enumerate(x_file_paths):
x = pd.read_csv(f, delimiter=' ')
x = np.array(x.iloc[-1])
# Sort the backstresses so that the largest gamma value is first
gammas = x[num_basic_param + 1::2]
ind = np.flipud(np.argsort(gammas))
# Exchange the gammas
x[num_basic_param + 1::2] = x[2 * ind + num_basic_param + 1]
# Exchange the Cs
x[num_basic_param::2] = x[2 * ind + num_basic_param]
temp_table = pd.DataFrame(x, columns=(material_id[i],)).transpose()
summary_table = summary_table.append(temp_table)
if calc_upd_metrics:
phi_values.append(calc_phi_total(x, data[i]))
else:
x_phi = np.insert(x, 4, [0., 1.])
phi_values.append(calc_phi_total(x_phi, data[i]))
# Rename the columns
summary_table.columns = parameter_labels[:len(summary_table.columns)]
# Add the phi values
summary_table.insert(0, r'$\bar{\varphi}$[\%]', phi_values)
# Add the load protocols
summary_table.insert(0, r'LP', load_protocols)
# Make the elastic modulus in GPa
summary_table[parameter_labels[0]] = summary_table[parameter_labels[0]] / 1000.
# Set the index name to Materials
summary_table.index.name = 'Material'
print (summary_table.to_latex(escape=False))
# Make the second table
summary_table_2 = pd.DataFrame()
for i, f in enumerate(x_file_paths):
# Calculate the comparison metrics
data_row = list(summary_table.iloc[i])
s_y0 = data_row[3]
hm = _hard_metric_at_peeq(data_row, num_basic_param, calc_upd_metrics, peeq)
sigma_sat = hm['sigma_sat']
sigma_hard = hm['sigma_hard']
rho_yield = hm['rho_yield']
rho_iso = hm['rho_iso']
rho_kin = hm['rho_kin']
rho_d = hm['rho_d']
if calc_upd_metrics:
new_row = np.array([s_y0, sigma_sat, sigma_hard, rho_yield, rho_iso, rho_kin, rho_d])
else:
new_row = np.array([s_y0, sigma_sat, sigma_hard, rho_yield, rho_iso, rho_kin])
# Add the values to the table for each material
temp_table = pd.DataFrame(new_row, columns=(material_id[i],)).transpose()
summary_table_2 = summary_table_2.append(temp_table)
# Rename the columns
summary_table_2.columns = metric_labels
# Set the index name to Materials
summary_table_2.index.name = 'Material'
print (summary_table_2.to_latex(escape=False))
return [summary_table, summary_table_2]
def _hard_metric_at_peeq(x, num_basic_param, calc_upd_metrics, peeq='sat'):
""" Calculates the hardening metrics for both the original and updated Voce-Chaboche models.
:param list x: Row of data from table_maker function.
:param int num_basic_param: Number of non-backstress related parameters in the model.
:param bool calc_upd_metrics: If True then calculates the rho_d metric, if False then sets it to 0.
:param str or float peeq: If 'sat' then calculates the metrics at model saturation, otherwise a finite equivalent
plastic strain.
:return dict: Hardening metrics.
Notes:
- If a finite peeq is provided, the metrics are calculated assuming that peeq increases monotonically
to the provided value.
"""
cols_before_kin = num_basic_param + 2
num_backstresses = (len(x) - cols_before_kin) // 2
s_y0 = x[3]
if peeq == 'sat':
# Calculate values assuming fully saturated
q_inf = x[4]
if calc_upd_metrics:
d_inf = x[6]
else:
d_inf = 0.
sum_kin = 0.
for j in range(num_backstresses):
c_j = x[cols_before_kin + 2 * j]
g_j = x[cols_before_kin + 1 + 2 * j]
sum_kin += c_j / g_j
else:
# Calculate values at finite equivalent plastic strain (monotonically increasing)
q_inf = x[4] * (1. - np.exp(-x[5] * peeq))
if calc_upd_metrics:
d_inf = x[6] * (1. - np.exp(-x[7] * peeq))
else:
d_inf = 0.
sum_kin = 0.
for j in range(num_backstresses):
c_j = x[cols_before_kin + 2 * j]
g_j = x[cols_before_kin + 1 + 2 * j]
sum_kin += c_j / g_j * (1. - np.exp(-g_j * peeq))
# Calculate all the metrics
sigma_sat = s_y0 + q_inf - d_inf + sum_kin
sigma_hard = q_inf + sum_kin
rho_yield = sigma_sat / s_y0
rho_iso = q_inf / sigma_hard
rho_kin = sum_kin / sigma_hard
rho_d = d_inf / (q_inf + sum_kin)
return {'sigma_sat': sigma_sat, 'sigma_hard': sigma_hard,
'rho_yield': rho_yield, 'rho_iso': rho_iso, 'rho_kin': rho_kin, 'rho_d': rho_d}
| mit | -3,612,788,240,963,097,600 | 47.08 | 117 | 0.606073 | false | 3.312435 | false | false | false |
sbidoul/buildbot | master/buildbot/test/integration/test_customservices.py | 6 | 3917 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from buildbot.test.util.decorators import flaky
from buildbot.test.util.integration import RunMasterBase
# This integration test creates a master and worker environment,
# with one builder and a custom step
# The custom step is using a CustomService, in order to calculate its result
# we make sure that we can reconfigure the master while build is running
class CustomServiceMaster(RunMasterBase):
@flaky(bugNumber=3340)
@defer.inlineCallbacks
def test_customService(self):
yield self.setupConfig(masterConfig())
build = yield self.doForceBuild(wantSteps=True)
self.assertEqual(build['steps'][0]['state_string'], 'num reconfig: 1')
myService = self.master.service_manager.namedServices['myService']
self.assertEqual(myService.num_reconfig, 1)
self.assertTrue(myService.running)
# We do several reconfig, and make sure the service
# are reconfigured as expected
yield self.master.reconfig()
build = yield self.doForceBuild(wantSteps=True)
self.assertEqual(myService.num_reconfig, 2)
self.assertEqual(build['steps'][0]['state_string'], 'num reconfig: 2')
yield self.master.reconfig()
myService2 = self.master.service_manager.namedServices['myService2']
self.assertTrue(myService2.running)
self.assertEqual(myService2.num_reconfig, 3)
self.assertEqual(myService.num_reconfig, 3)
yield self.master.reconfig()
# second service removed
self.assertNotIn(
'myService2', self.master.service_manager.namedServices)
self.assertFalse(myService2.running)
self.assertEqual(myService2.num_reconfig, 3)
self.assertEqual(myService.num_reconfig, 4)
# master configuration
num_reconfig = 0
def masterConfig():
global num_reconfig
num_reconfig += 1
c = {}
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.steps.shell import ShellCommand
from buildbot.util.service import BuildbotService
class MyShellCommand(ShellCommand):
def getResultSummary(self):
service = self.master.service_manager.namedServices['myService']
return dict(step=u"num reconfig: %d" % (service.num_reconfig,))
class MyService(BuildbotService):
name = "myService"
def reconfigService(self, num_reconfig):
self.num_reconfig = num_reconfig
return defer.succeed(None)
c['schedulers'] = [
ForceScheduler(
name="force",
builderNames=["testy"])]
f = BuildFactory()
f.addStep(MyShellCommand(command='echo hei'))
c['builders'] = [
BuilderConfig(name="testy",
workernames=["local1"],
factory=f)]
c['services'] = [MyService(num_reconfig=num_reconfig)]
if num_reconfig == 3:
c['services'].append(
MyService(name="myService2", num_reconfig=num_reconfig))
return c
| gpl-2.0 | -1,451,463,372,495,509,500 | 32.478632 | 79 | 0.693388 | false | 4.088727 | true | false | false |
Digoss/funny_python | botNet.py | 1 | 1137 | import argparse
import pxssh
import sys
class Client:
def __init__(self, host, user, password):
self.host = host
self.user = user
self.password = password
self.session = self.connect()
def connect(self):
try:
s = pxssh.pxssh()
s.login(self.host, self.user, self.password)
return s
except Exception, e:
print e
print '[-] Error Connecting'
def send_command(self, cmd):
self.session.sendline(cmd)
self.session.prompt()
return self.session.before
def botnetCommand(command):
for client in botNet:
output = client.send_command(command)
print '[*] Output from ' + client.host
print '[+] ' + output + '\n'
def addClient(host, user, password):
client = Client(host, user, password)
botNet.append(client)
botNet = []
addClient('ip', 'user', 'password')
try:
while True:
command = raw_input("Put a command: ")
botnetCommand(command)
except KeyboardInterrupt:
print "KeyboardInterrupt"
sys.exit()
def main():
init()
if __name__ == "__main__":
main | bsd-3-clause | -1,686,618,998,251,673,000 | 20.471698 | 56 | 0.595427 | false | 3.644231 | false | false | false |
Jordy281/Tic_Tac_Toe_SuperComputer | TicTacToe.py | 1 | 5041 | import numpy as np
import copy
from random import randrange
import game.py
import arena.py
import gym.py
import winningPercent.py
"""
Piece State,
0 = Empty
1 = X
2 = O
0 | 1 | 2
---------------
3 | 4 | 5
---------------
6 | 7 | 8
"""
"""
This will check if the current state exists,
IF YES: return index of it
IF NO: return -1
"""
def stateChecker(states, board):
index=-1
i=0
while i<len(states) and index==-1:
match=True
j=0
while j<9 and match is True:
if states[i][j]!=board[j]:
match=False
j+=1
if match is True:
index=i
i+=1
return index
"""This will start us cranking out all possible states """
def createAllPossibleStates(states, R, t):
board=[0,0,0,0,0,0,0,0,0]
states.append(board)
createAllStates(states,R, t,board, 1, -1, -1 , False,)
print "Woe, that was tough!"
def createAllStates(states, R, t, board, turn, previousState, previousMove, GameOver):
currentState=copy.deepcopy(len(states)-1)
#prevMove=copy.deepcopy(previousMove)
#prevState=copy.deepcopy(previousState)
newTurn=copy.deepcopy(turn)
#playerThatWon=copy.deepcopy(winningPlayer)
R.append([0.,0.,0.,0.,0.,0.,0.,0.,0.])
t.append([0,0,0,0,0,0,0,0,0])
"""
if turn==10:
#print "DRAW"
R.append([-1,-1,-1,-1])
return
"""
for i in range(0,9):
currentMove=copy.deepcopy(i)
#Check for empty square
if board[i]==0:
newBoard=copy.deepcopy(board)
game.addMove(newBoard, turn, i)
gameOv=copy.deepcopy(GameOver)
#if gameOv is True:
# if newTurn%2==playerThatWon%2:
# R[currentState][i]=100.0
if game.threecheck(newBoard) is True and gameOv is False:
R[currentState][i]=100.0
gameOv=True
#winningPlayer=newTurn%2
#we need to alter the reward from previous movement to reflect a loss
#R[prevState][prevMove]=-100.0
#If the game is not over, the last player puts a piece down to draw
elif game.threecheck(newBoard) is False and gameOv is False and turn==9:
#R[prevState][prevMove]=25
R[currentState][i]=25
gameOv=True
#Here we will find if we will be at a previously
check=stateChecker(states, newBoard)
if check==-1: #If this is a board we have not seen
states.append(newBoard)
t[currentState][currentMove]=len(states)-1
#Go to next state from current move
#We will have to send the info for current state and move in case it results in a direct loss
createAllStates(states,R, t, newBoard,newTurn+1, currentState, currentMove, gameOv)
else:
# if this is, all we will ahve to do is append the INDEX FOR THE next state
# This will allow us to quickly jump to that state.
t[currentState][currentMove]=check
#IF the square is taken, we can not place a piece there
#so there is not corresponding cation or reward
else:
R[currentState][currentMove]=-np.inf
t[currentState][currentMove]=-1
def setQNoBacktrack(Q,t):
for i in range (len(t)):
for j in range (len(t[0])):
if t[i][j]==-1:
Q[i,j]=-np.inf
#-------------------------------------------------------------------------------------------
"""
States holds all boards
R holds all rewards
t holds list of actions, and location of state following action
"""
states=[]
R=[]
t=[]
print "Loading states, please wait."
createAllPossibleStates(states, R, t)
#nStates= np.shape(R)[0]
#print nStates
print "Time to get to the gym, brb."
Qrand1 = trainingAgainstRand1(states,t)
Qrand2 = trainingAgainstRand2(states,t)
QQ=trainingAgainstLearner(states, t)
Qplayer1=QQ[0]
Qplayer2=QQ[1]
# ****** If you want to calculate winning percentages of the learners, enable the next line**********
winningPercentages.winningPercent(QQ, Qrand1, Qrand2, t, states)
#-----------------------------------------------------------------------------------------------------------
# This section is a user menu that allows the
# user to determine if they want two trained
# computers to battle, or play against the super computer
mode=0
while mode!=3:
print "Would you like:"
print "1. Two computers to battle to the death"
print "2. Play against the super computer"
print "3. Quit"
mode=int(raw_input('Input:'))
if mode==1:
print "You selected two computers"
arena.TwoComputers(QRand1,QRand2, t,states, Comp1Win,RandWin,Draw)
print ""
print ""
elif mode==2:
print "So you want to play?"
print ""
print ""
arena.soIHearYouLikeToPlay(Q, states)
elif mode!=3:
print "Invalid Response"
print ""
print""
print "done"
| mit | 2,689,065,188,618,646,500 | 22.018265 | 108 | 0.57687 | false | 3.349502 | false | false | false |
GNOME/pyclutter | examples/image-content.py | 1 | 2894 | # Clutter depends on Cogl 1.0 for public API, but Cogl ships with
# introspection data for both 1.0 and 2.0; pygobject will prefer
# the latter, so we need to load Clutter before Cogl
from gi.repository import Clutter
from gi.repository import Cogl
from gi.repository import GdkPixbuf
gravities = [
( Clutter.ContentGravity.TOP_LEFT, 'Top Left' ),
( Clutter.ContentGravity.TOP, 'Top' ),
( Clutter.ContentGravity.TOP_RIGHT, 'Top Right' ),
( Clutter.ContentGravity.LEFT, 'Left' ),
( Clutter.ContentGravity.CENTER, 'Center' ),
( Clutter.ContentGravity.RIGHT, 'Right' ),
( Clutter.ContentGravity.BOTTOM_LEFT, 'Bottom Left' ),
( Clutter.ContentGravity.BOTTOM, 'Bottom' ),
( Clutter.ContentGravity.BOTTOM_RIGHT, 'Bottom Right' ),
( Clutter.ContentGravity.RESIZE_FILL, 'Resize Fill' ),
( Clutter.ContentGravity.RESIZE_ASPECT, 'Resize Aspect' )
]
current_gravity = 0
def on_tap(action, actor, text):
global gravities, current_gravity
# Change the label
text.props.text = 'Content Gravity: ' + gravities[current_gravity][1]
# Animate the content gravity changes
with actor.easing_state():
actor.set_content_gravity(gravities[current_gravity][0])
# Cycle through all gravities
current_gravity += 1
if current_gravity >= len(gravities):
current_gravity = 0
if __name__ == '__main__':
Clutter.init(None)
# Our stage
stage = Clutter.Stage(title='Content Box', user_resizable=True)
stage.set_margin(Clutter.Margin(12))
stage.connect('destroy', Clutter.main_quit)
stage.show()
# Load the texture data from a file
pixbuf = GdkPixbuf.Pixbuf.new_from_file('redhand.png')
# Use the correct pixel format depending on whether the image
# has an alpha channel
pixel_format = Cogl.PixelFormat.RGB_888
if pixbuf.get_has_alpha():
pixel_format = Cogl.PixelFormat.RGBA_8888
data = pixbuf.read_pixel_bytes()
width = pixbuf.get_width()
height = pixbuf.get_height()
stride = pixbuf.get_rowstride()
# The Image content knows how to draw texture data
image = Clutter.Image()
image.set_bytes(data, pixel_format, width, height, stride)
# A Stage is like any other actor, and can paint a Content
stage.set_content_gravity(Clutter.ContentGravity.RESIZE_ASPECT)
stage.set_content_scaling_filters(Clutter.ScalingFilter.TRILINEAR, Clutter.ScalingFilter.LINEAR)
stage.set_content(image)
# Show a label with the current content gravity
label = 'Content Gravity: Resize Aspect'
text = Clutter.Text(text=label)
text.add_constraint(Clutter.AlignConstraint(source=stage, align_axis=Clutter.AlignAxis.BOTH, factor=0.5))
stage.add_child(text)
# Change the content gravity on tap/click
action = Clutter.TapAction()
action.connect('tap', on_tap, text)
stage.add_action(action)
Clutter.main()
| lgpl-2.1 | 3,622,187,896,764,888,600 | 33.047059 | 109 | 0.696959 | false | 3.318807 | false | false | false |
liulion/mayavi | docs/source/mayavi/auto/wigner.py | 8 | 2866 | """
An example in which 3 functions of x and y are displayed with a surf plot,
while the z scaling is kept constant, to allow comparison between them.
The important aspect of this example is that the 3 functions should not
be displayed on top of each other, but side by side. For this we use the
extent keyword argument.
In addition, the relative scale between the different plots is important.
This is why we also use the `warp_scale` keyword argument, to have the same
scale on all plots.
Finally, we have to adjust the data bounds: as we want the "horizon" of
the wigner function in the middle of our extents, we put this to zero.
We add a set of axes and outlines to the plot. We have to play we extents
and ranges in order to make them fit with the data.
"""
# Author: Gael Varoquaux <[email protected]>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
import numpy
from mayavi import mlab
def cat(x, y, alpha, eta=1, purity=1):
""" Multiphoton shrodinger cat. eta is the fidelity, alpha the number
of photons"""
cos = numpy.cos
exp = numpy.exp
return (1 + eta * (exp(-x ** 2 - (y - alpha) ** 2) + exp(-x ** 2 -
(y + alpha) ** 2) + 2 * purity * exp(-x ** 2 - y ** 2) * cos(2 * \
alpha * x)) / (2 * (1 + exp(- alpha ** 2)))) / 2
x, y = numpy.mgrid[-4:4.15:0.1, -4:4.15:0.1]
mlab.figure(1, size=(500, 250), fgcolor=(1, 1, 1),
bgcolor=(0.5, 0.5, 0.5))
mlab.clf()
cat1 = cat(x, y, 1)
cat2 = cat(x, y, 2)
cat3 = cat(x, y, 3)
# The cats lie in a [0, 1] interval, with .5 being the assymptotique
# value. We want to reposition this value to 0, so as to put it in the
# center of our extents.
cat1 -= 0.5
cat2 -= 0.5
cat3 -= 0.5
cat1_extent = (-14, -6, -4, 4, 0, 5)
surf_cat1 = mlab.surf(x - 10, y, cat1, colormap='Spectral', warp_scale=5,
extent=cat1_extent, vmin=-0.5, vmax=0.5)
mlab.outline(surf_cat1, color=(.7, .7, .7), extent=cat1_extent)
mlab.axes(surf_cat1, color=(.7, .7, .7), extent=cat1_extent,
ranges=(0, 1, 0, 1, 0, 1), xlabel='', ylabel='',
zlabel='Probability',
x_axis_visibility=False, z_axis_visibility=False)
mlab.text(-18, -4, '1 photon', z=-4, width=0.13)
cat2_extent = (-4, 4, -4, 4, 0, 5)
surf_cat2 = mlab.surf(x, y, cat2, colormap='Spectral', warp_scale=5,
extent=cat2_extent, vmin=-0.5, vmax=0.5)
mlab.outline(surf_cat2, color=(0.7, .7, .7), extent=cat2_extent)
mlab.text(-4, -3, '2 photons', z=-4, width=0.14)
cat3_extent = (6, 14, -4, 4, 0, 5)
surf_cat3 = mlab.surf(x + 10, y, cat3, colormap='Spectral', warp_scale=5,
extent=cat3_extent, vmin=-0.5, vmax=0.5)
mlab.outline(surf_cat3, color=(.7, .7, .7), extent=cat3_extent)
mlab.text(6, -2.5, '3 photons', z=-4, width=0.14)
mlab.title('Multi-photons cats Wigner function')
mlab.view(142, -72, 32)
mlab.show()
| bsd-3-clause | 8,597,588,439,283,360,000 | 33.53012 | 75 | 0.630495 | false | 2.636615 | false | false | false |
jordotech/sherri_satchmo | satchmo/projects/skeleton/local_settings.py | 6 | 2241 | # this is an extremely simple Satchmo standalone store.
import logging
import os, os.path
LOCAL_DEV = True
DEBUG = True
TEMPLATE_DEBUG = DEBUG
if LOCAL_DEV:
INTERNAL_IPS = ('127.0.0.1',)
DIRNAME = os.path.dirname(os.path.abspath(__file__))
SATCHMO_DIRNAME = DIRNAME
gettext_noop = lambda s:s
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('en', gettext_noop('English')),
)
#These are used when loading the test data
SITE_NAME = "simple"
DATABASES = {
'default': {
# The last part of ENGINE is 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'ado_mssql'.
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DIRNAME, 'simple.db'), # Or path to database file if using sqlite3
#'USER': '', # Not used with sqlite3.
#'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
SECRET_KEY = 'EXAMPLE SECRET KEY'
##### For Email ########
# If this isn't set in your settings file, you can set these here
#EMAIL_HOST = 'host here'
#EMAIL_PORT = 587
#EMAIL_HOST_USER = 'your user here'
#EMAIL_HOST_PASSWORD = 'your password'
#EMAIL_USE_TLS = True
#These are used when loading the test data
SITE_DOMAIN = "localhost"
SITE_NAME = "Simple Satchmo"
# not suitable for deployment, for testing only, for deployment strongly consider memcached.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'satchmo-cache',
'TIMEOUT': 60
}
}
ACCOUNT_ACTIVATION_DAYS = 7
#Configure logging
LOGFILE = "satchmo.log"
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(DIRNAME,LOGFILE),
filemode='w')
logging.getLogger('django.db.backends').setLevel(logging.INFO)
logging.getLogger('keyedcache').setLevel(logging.INFO)
logging.getLogger('l10n').setLevel(logging.INFO)
logging.getLogger('suds').setLevel(logging.INFO)
logging.info("Satchmo Started")
| bsd-3-clause | -4,597,215,638,158,019,600 | 28.88 | 96 | 0.636323 | false | 3.410959 | false | false | false |
artreven/pp_api | pp_api/extractor_utils.py | 1 | 3365 | """
Extractor-related utility functions.
"""
from collections import defaultdict
def ppextract2matches(matches, tag=None, overlaps=True):
"""
Convert PP extractor API results to 4-tuples specifying annotations,
usable for edit operations on the input file.
Overlapping tuples may optionally be removed, since it is tricky to
apply overlapping offset-based annotations to a string.
:param matches: An array of dicts as returned by pp_api.PoolParty.get_cpts_from_response().
:param tag: A fixed tag to annotate with. If None, annotate with the
prefLabel of each matched concept.
:param overlaps: Whether to include overlapping annotations in the results.
:return: A list of tuples (start, end, tag, content).
`start` and `end` are the character offsets. `content` is
the text content of this span, e.g. for error checking.
Note: pp_api.PoolParty.get_cpts_from_response() returns this structure:
[
{
"prefLabel": "Something",
"uri": "https:...",
...
"matchings": [
{
"text": "something",
"frequency": n,
"positions": [
[
start1,
end1
],
[ start2,
end2
]
]
},
{
"text": "something_else",
...
"""
use_labels = bool(tag is None)
edits = []
for cpt_dict in matches:
if use_labels:
tag = cpt_dict["prefLabel"]
# We can't annotate shadow concepts:
if "matchings" not in cpt_dict:
continue
for match in cpt_dict["matchings"]:
for start, end in match["positions"]:
edits.append((start, end, tag, match["text"]))
if not overlaps:
edits = remove_overlaps(edits)
return edits
def remove_overlaps(matches):
"""
Return a subset of the matches, so that
they are unique, ordered and non-overlapping.
:param matches: a list of 4-tuples (start, end, tag, content)
:return: the cleaned list
"""
# Example that must be handled: Three annotations data, security, "data security"
#
# [ [data] [security] ]
# Remove repetitions (e.g., ambiguous concept labels matching the same text)
matches = set(matches)
# Group edits by start position
groups = defaultdict(list)
for edt in matches:
start, end, tag, match = edt
groups[start].append(edt)
# If several spans start at the same point, we keep the longest one
# (If we still have two prefLabels with the same span, keeps the one that sorts last)
for k, members in groups.items():
if len(members) > 1:
members[:] = sorted(members, key=lambda x: x[1])[-1:]
matches = sorted(v[0] for v in groups.values())
# Now look for concepts that start before the last one ended
offset = -1
clean = []
for edt in matches:
start, end, tag, match = edt
if start <= offset:
continue
clean.append(edt)
offset = end
return clean
| mit | 2,559,736,327,515,930,600 | 29.315315 | 95 | 0.549777 | false | 4.41601 | false | false | false |
RedHatSatellite/satellite-sanity | satellite_sanity_lib/rules/sat5_taskomatic_running.py | 2 | 1187 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
tags = ['Satellite_5', 'Spacewalk']
name = 'Taskomatic service is running'
from satellite_sanity_lib.util import get_days_uptime
def find_taskomatic_process(data):
"""
Check the ps output to see if taskomatic is running
"""
for line in data['ps_auxcww']:
if line.endswith(' taskomaticd') or ' /usr/bin/taskomaticd ' in line:
return {'TASKOMATIC_PROCESS_LINE': line}
def get_uptime(data):
"""
Return the number of days the machine has been up
"""
return {'UPTIME_DAYS': int(get_days_uptime(data['uptime'][0]))}
def main(data):
if data['ps_auxcww'] is not None or data['uptime'] is not None:
# We do not want to hit case when system just booted, Satellite
# is still starting (taskomatic not yet running)
if get_uptime(data)['UPTIME_DAYS'] > 0:
if not find_taskomatic_process(data):
return True
def text(result):
out = ""
out += "Service Taskomatic does't seems to be running.\n"
out += "Use `service taskomatic restart` to restart it.\n"
out += "See https://access.redhat.com/solutions/2116911"
return out
| gpl-3.0 | 9,127,073,422,926,538,000 | 30.236842 | 77 | 0.634372 | false | 3.420749 | false | false | false |
saghul/python-asiri | asiri/i2c.py | 1 | 2033 |
import smbus
__all__ = ['I2C']
class I2C(object):
def __init__(self, address, busnum, debug=False):
self._address = address
self._bus = smbus.SMBus(busnum)
self._debug = debug
@property
def address(self):
return self._address
def _log_debug(self, msg):
print "I2C: %s" % msg
def _log_error(self, msg):
print "I2C: Error accessing 0x%02X: %s" % (self._address, msg)
def write8(self, reg, value):
"""
Writes an 8-bit value to the specified register/address
"""
try:
self._bus.write_byte_data(self._address, reg, value)
if self._debug:
self._log_debug("Wrote 0x%02X to register 0x%02X" % (value, reg))
except IOError as e:
self._log_error(e)
def write16(self, reg, value):
"""
Writes a 16-bit value to the specified register/address pair
"""
try:
self._bus.write_word_data(self._address, reg, value)
if self._debug:
self._log_debug("Wrote 0x%02X to register pair 0x%02X, 0x%02X" % (value, reg, reg+1))
except IOError as e:
self._log_error(e)
def read8(self, reg):
"""
Read an 8-bit value from the I2C device
"""
try:
result = self._bus.read_byte_data(self._address, reg)
if self._debug:
self._log_debug("Device 0x%02X returned 0x%02X from reg 0x%02X" % (self._address, result & 0xFF, reg))
return result
except IOError as e:
self._log_error(e)
def read16(self, reg):
"""
Read a 16-bit value from the I2C device
"""
try:
result = self._bus.read_word_data(self._address, reg)
if self._debug:
self._log_debug("Device 0x%02X returned 0x%02X from reg 0x%02X" % (self._address, result & 0xFF, reg))
return result
except IOError as e:
self._log_error(e)
| mit | 3,518,061,532,595,636,000 | 28.463768 | 118 | 0.531235 | false | 3.535652 | false | false | false |
django-danceschool/django-danceschool | danceschool/financial/migrations/0018_auto_20190412_1528.py | 1 | 2028 | # Generated by Django 2.1.7 on 2019-04-12 19:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
class Migration(migrations.Migration):
dependencies = [
('financial', '0016_auto_20190409_0033'),
]
operations = [
migrations.AlterField(
model_name='expenseitem',
name='attachment',
field=filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='expense_attachment', to='filer.File', verbose_name='Attach File (optional)'),
),
migrations.AlterField(
model_name='revenueitem',
name='attachment',
field=filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='revenue_attachment', to='filer.File', verbose_name='Attach File (optional)'),
),
migrations.AlterField(
model_name='revenueitem',
name='currentlyHeldBy',
field=models.ForeignKey(blank=True, help_text='If cash has not yet been deposited, this indicates who to contact in order to collect the cash for deposit.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='revenuesheldby', to=settings.AUTH_USER_MODEL, verbose_name='Cash currently in possession of'),
),
migrations.AlterField(
model_name='revenueitem',
name='event',
field=models.ForeignKey(blank=True, help_text='If this item is associated with an Event, enter it here.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Event', verbose_name='Event'),
),
migrations.AlterField(
model_name='revenueitem',
name='invoiceItem',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.InvoiceItem', verbose_name='Associated invoice item'),
),
]
| bsd-3-clause | 6,035,019,398,571,092,000 | 48.463415 | 334 | 0.664694 | false | 3.9 | false | false | false |
SriHarshaGajavalli/SE2017 | home/serializers.py | 2 | 2622 | from rest_framework import serializers
from home.models import *
from django.contrib.auth.models import User
#serializer for User table
class UserSerializer(serializers.ModelSerializer):
class Meta:
model=User
fields=('id','username','first_name','last_name','email')
#Serializer for Personnel table
class PersonnelSerializer(serializers.ModelSerializer):
class Meta:
model=Personnel
fields=('Person_ID','LDAP','Role','Dept')
#Serializer for Department table
class DepartmentSerializer(serializers.ModelSerializer):
class Meta:
model=Department
fields=('Dept_ID','Dept_Name')
class RolesSerializer(serializers.ModelSerializer):
class Meta:
model=Roles
fields=('Role_ID','Role_name','level')
#Serializer for Courses table
class CoursesSerializer(serializers.ModelSerializer):
class Meta:
model=Courses
fields=('Course_ID','Course_Name','Course_description','Course_Credits','Course_Year','Course_Status')
#Serializer for Document table
class DocumentsSerializer(serializers.ModelSerializer):
class Meta:
model=Documents
fields=('Doc_ID','Doc_Name','Document')
#Serializer for Assignment table
class AssignmentSerializer(serializers.ModelSerializer):
class Meta:
model=Assignment
fields=('Assign_ID','Assignment_File','Course_ID','Start_Time','End_Time')
#Serializer for Submission table
class SubmissionsSerializer(serializers.ModelSerializer):
class Meta:
model=Submissions
fields=('Sub_ID','Assign_ID','Student_ID','Sub_Time','Score')
class ICSerializer(serializers.ModelSerializer):
class Meta:
model=Instructors_Courses
fields=('IC_id','Course_ID','Inst_ID','Start_Date','End_Date')
class SCSerializer(serializers.ModelSerializer):
class Meta:
model=Students_Courses
fields=('SC_ID','Student_ID','Course_ID','Reg_Date')
class EventsSerializer(serializers.ModelSerializer):
class Meta:
model=Events
fields=('Event_ID','Event_Date','Event_Name')
class SPSerializer(serializers.ModelSerializer):
class Meta:
model=Student_Period
fields=('Student_ID','Start_Year','End_Year')
#Serializer for Attendance table
class AttendanceSerializer(serializers.ModelSerializer):
class Meta:
model=Attendance
fields=('Student_ID','ASession_ID','Date_time','Marked')
#Serializer for Attendance table
class Attendance_SessionSerializer(serializers.ModelSerializer):
class Meta:
model=Attendance_Session
fields=('Session_ID','Course_Slot','Date_time','Status','Location')
#Serializer for Timetable table
class TimetableSerializer(serializers.ModelSerializer):
class Meta:
model=Timetable
fields=('T_days','Start_time','End_time','Course_ID','Class_ID')
| mit | -3,519,195,429,173,635,600 | 30.590361 | 104 | 0.768879 | false | 3.5625 | false | false | false |
ojab/bnw | bnw/handlers/command_show.py | 1 | 7775 | # coding: utf-8
import time
import pymongo
from base import *
import bnw.core.bnw_objects as objs
def get_user_bl(request, use_bl=False):
"""Return authed user blacklist or simply an empty list
if user not authed.
:param use_bl: default False. Whether we should return actual
blacklist or just empty list.
"""
if use_bl and request.user:
bl = request.user.get('blacklist', [])
bl = [el[1] for el in bl if el[0] == 'user']
return bl
else:
return []
@defer.inlineCallbacks
def set_subscriptions_info(request, messages):
"""Add 'subscribed' param for each message which
indicate do the user subscribed on the message or not.
Return updated list of messages (update in place actually!).
For non-authed users return non-modified list.
:param request: BnW request object.
"""
if not request.user:
defer.returnValue(messages)
user = request.user['name']
ids = [m['id'] for m in messages]
subscriptions = yield objs.Subscription.find({
'user': user, 'type': 'sub_message', 'target': {'$in': ids}})
sub_ids = [s['target'] for s in subscriptions]
for msg in messages:
msg['subscribed'] = True if msg['id'] in sub_ids else False
defer.returnValue(messages)
@defer.inlineCallbacks
def showSearch(parameters, page, request):
# FIXME: THIS COMMAND IS FUCKING SLOW SLOW SLOW AND WAS WRITTEN BY A
# BRAIN-DAMAGED IDIOT
messages = [x.filter_fields() for x in (yield objs.Message.find_sort(
parameters, [('date', pymongo.DESCENDING)], limit=20, skip=page * 20))]
messages = yield set_subscriptions_info(request, messages)
messages.reverse()
defer.returnValue(dict(
ok=True, format="messages", cache=5, cache_public=True,
messages=messages))
@defer.inlineCallbacks
def showComment(commentid):
comment = yield objs.Comment.find_one({'id': commentid})
if comment is None:
defer.returnValue(
dict(ok=False, desc='No such comment',
cache=5, cache_public=True)
)
defer.returnValue(
dict(ok=True, format='comment', cache=5, cache_public=True,
comment=comment.filter_fields(),
))
@defer.inlineCallbacks
def showComments(msgid, request, bl=None, after=''):
message = yield objs.Message.find_one({'id': msgid})
if message is None:
defer.returnValue(dict(
ok=False, desc='No such message', cache=5, cache_public=True))
if request.user:
user = request.user['name']
subscribed = yield objs.Subscription.count({
'user': user, 'type': 'sub_message', 'target': msgid})
message['subscribed'] = bool(subscribed)
qdict = {'message': msgid.upper()}
if bl:
qdict['user'] = {'$nin': bl}
if after:
after_comment = yield objs.Comment.find_one({'id':msgid+'/'+after.split('/')[-1]})
if after_comment:
qdict['date'] = {'$gte': after_comment['date']}
comments = yield objs.Comment.find_sort(
qdict, [('date', pymongo.ASCENDING)], limit=10000)
defer.returnValue(dict(
ok=True, format='message_with_replies', cache=5, cache_public=True,
msgid=msgid, message=message.filter_fields(),
replies=[comment.filter_fields() for comment in comments]))
@check_arg(message=MESSAGE_COMMENT_RE, page='[0-9]+')
@defer.inlineCallbacks
def cmd_show(request, message='', user='', tag='', club='', page='0',
show='messages', replies=None, use_bl=False, after='', before=''):
"""Show messages by specified parameters."""
message = canonic_message_comment(message).upper()
bl = get_user_bl(request, use_bl)
if '/' in message:
defer.returnValue((yield showComment(message)))
if replies:
if not message:
defer.returnValue(dict(
ok=False,
desc="Error: 'replies' is allowed only with 'message'.",
cache=3600))
defer.returnValue((yield showComments(message, request, bl, after)))
else:
if show not in ['messages', 'recommendations', 'all']:
defer.returnValue(dict(
ok=False, desc="Bad 'show' parameter value."))
parameters = [('tags', tag), ('clubs', club), ('id', message.upper())]
parameters = dict(p for p in parameters if p[1])
if user:
user = canonic_user(user).lower()
if show == 'messages':
user_spec = dict(user=user)
elif show == 'recommendations':
user_spec = dict(recommendations=user)
else:
user_spec = {'$or': [{'user': user}, {
'recommendations': user}]}
parameters.update(user_spec)
elif bl:
parameters['user'] = {'$nin': bl}
if before:
befmsg = yield objs.Message.find_one({'id': before})
if befmsg:
parameters['date'] = {'$lt': befmsg['date']}
else:
defer.returnValue(dict(ok=False, desc="Message to search before doesn't exist."))
if after:
afmsg = yield objs.Message.find_one({'id': after})
if afmsg:
parameters['date'] = {'$gt': afmsg['date']}
else:
defer.returnValue(dict(ok=False, desc="Message to search after doesn't exist."))
defer.returnValue((yield showSearch(parameters, int(page), request)))
@require_auth
@defer.inlineCallbacks
def cmd_feed(request, page="0"):
""" Показать ленту """
page = int(page) if page else 0
feed = yield objs.FeedElement.find_sort({'user': request.user['name']},
[('_id', pymongo.DESCENDING)], limit=20, skip=page * 20)
messages = [x.filter_fields() for x in (yield objs.Message.find_sort({'id': {'$in':
[f['message']
for f in feed]
}}, [('date', pymongo.ASCENDING)]))]
defer.returnValue(
dict(ok=True, format="messages",
messages=messages,
desc='Your feed',
cache=5)
)
@defer.inlineCallbacks
def cmd_today(request, use_bl=False):
""" Показать обсуждаемое за последние 24 часа """
bl = get_user_bl(request, use_bl)
for x in range(10):
postids = [x['_id'] for x in (yield objs.Today.find({}, limit=20))]
if len(postids)>0: break
qdict = {'id': {'$in': postids}}
if bl: qdict['user'] = {'$nin': bl}
dbposts = dict(
(x['id'], x.filter_fields())
for x in (yield objs.Message.find(qdict)))
messages = [dbposts[x] for x in postids if (x in dbposts)]
messages = yield set_subscriptions_info(request, messages)
messages.reverse()
defer.returnValue(
dict(ok=True, format="messages",
messages=messages,
desc='Today''s most discussed',
cache=300)
)
@defer.inlineCallbacks
def cmd_today2(request):
""" Показать обсуждаемое за последние 24 часа """
start = time.time() - 86400
messages = [x.filter_fields() for x in (yield objs.Message.find_sort({'date': {'$gte': start}}, [('replycount', pymongo.DESCENDING)], limit=20))]
messages.reverse()
defer.returnValue(
dict(ok=True, format="messages",
messages=messages,
desc='Today''s most discussed',
cache=300)
)
| bsd-2-clause | 6,002,972,927,831,207,000 | 36.349515 | 149 | 0.568365 | false | 3.839321 | false | false | false |
keithito/tacotron | util/audio.py | 1 | 4673 | import librosa
import librosa.filters
import math
import numpy as np
import tensorflow as tf
import scipy
from hparams import hparams
def load_wav(path):
return librosa.core.load(path, sr=hparams.sample_rate)[0]
def save_wav(wav, path):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
scipy.io.wavfile.write(path, hparams.sample_rate, wav.astype(np.int16))
def preemphasis(x):
return scipy.signal.lfilter([1, -hparams.preemphasis], [1], x)
def inv_preemphasis(x):
return scipy.signal.lfilter([1], [1, -hparams.preemphasis], x)
def spectrogram(y):
D = _stft(preemphasis(y))
S = _amp_to_db(np.abs(D)) - hparams.ref_level_db
return _normalize(S)
def inv_spectrogram(spectrogram):
'''Converts spectrogram to waveform using librosa'''
S = _db_to_amp(_denormalize(spectrogram) + hparams.ref_level_db) # Convert back to linear
return inv_preemphasis(_griffin_lim(S ** hparams.power)) # Reconstruct phase
def inv_spectrogram_tensorflow(spectrogram):
'''Builds computational graph to convert spectrogram to waveform using TensorFlow.
Unlike inv_spectrogram, this does NOT invert the preemphasis. The caller should call
inv_preemphasis on the output after running the graph.
'''
S = _db_to_amp_tensorflow(_denormalize_tensorflow(spectrogram) + hparams.ref_level_db)
return _griffin_lim_tensorflow(tf.pow(S, hparams.power))
def melspectrogram(y):
D = _stft(preemphasis(y))
S = _amp_to_db(_linear_to_mel(np.abs(D))) - hparams.ref_level_db
return _normalize(S)
def find_endpoint(wav, threshold_db=-40, min_silence_sec=0.8):
window_length = int(hparams.sample_rate * min_silence_sec)
hop_length = int(window_length / 4)
threshold = _db_to_amp(threshold_db)
for x in range(hop_length, len(wav) - window_length, hop_length):
if np.max(wav[x:x+window_length]) < threshold:
return x + hop_length
return len(wav)
def _griffin_lim(S):
'''librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
'''
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles)
for i in range(hparams.griffin_lim_iters):
angles = np.exp(1j * np.angle(_stft(y)))
y = _istft(S_complex * angles)
return y
def _griffin_lim_tensorflow(S):
'''TensorFlow implementation of Griffin-Lim
Based on https://github.com/Kyubyong/tensorflow-exercises/blob/master/Audio_Processing.ipynb
'''
with tf.variable_scope('griffinlim'):
# TensorFlow's stft and istft operate on a batch of spectrograms; create batch of size 1
S = tf.expand_dims(S, 0)
S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))
y = _istft_tensorflow(S_complex)
for i in range(hparams.griffin_lim_iters):
est = _stft_tensorflow(y)
angles = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64)
y = _istft_tensorflow(S_complex * angles)
return tf.squeeze(y, 0)
def _stft(y):
n_fft, hop_length, win_length = _stft_parameters()
return librosa.stft(y=y, n_fft=n_fft, hop_length=hop_length, win_length=win_length)
def _istft(y):
_, hop_length, win_length = _stft_parameters()
return librosa.istft(y, hop_length=hop_length, win_length=win_length)
def _stft_tensorflow(signals):
n_fft, hop_length, win_length = _stft_parameters()
return tf.contrib.signal.stft(signals, win_length, hop_length, n_fft, pad_end=False)
def _istft_tensorflow(stfts):
n_fft, hop_length, win_length = _stft_parameters()
return tf.contrib.signal.inverse_stft(stfts, win_length, hop_length, n_fft)
def _stft_parameters():
n_fft = (hparams.num_freq - 1) * 2
hop_length = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)
win_length = int(hparams.frame_length_ms / 1000 * hparams.sample_rate)
return n_fft, hop_length, win_length
# Conversions:
_mel_basis = None
def _linear_to_mel(spectrogram):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
return np.dot(_mel_basis, spectrogram)
def _build_mel_basis():
n_fft = (hparams.num_freq - 1) * 2
return librosa.filters.mel(hparams.sample_rate, n_fft, n_mels=hparams.num_mels)
def _amp_to_db(x):
return 20 * np.log10(np.maximum(1e-5, x))
def _db_to_amp(x):
return np.power(10.0, x * 0.05)
def _db_to_amp_tensorflow(x):
return tf.pow(tf.ones(tf.shape(x)) * 10.0, x * 0.05)
def _normalize(S):
return np.clip((S - hparams.min_level_db) / -hparams.min_level_db, 0, 1)
def _denormalize(S):
return (np.clip(S, 0, 1) * -hparams.min_level_db) + hparams.min_level_db
def _denormalize_tensorflow(S):
return (tf.clip_by_value(S, 0, 1) * -hparams.min_level_db) + hparams.min_level_db
| mit | 4,423,930,429,883,598,300 | 29.94702 | 94 | 0.693345 | false | 2.720023 | false | false | false |
Zir0ne/HolyCup | Lirael/subscriber.py | 1 | 2764 | """
莉芮尔订阅坏狗发出的每一条tick信息
本模块基于zeromq实现, 通过创建一个socket到指定端口, 我们可以收取坏狗群发的每一条tick信息
用户还可以设置关键词来过滤我们不需要的tick信息(暂未实现)
"""
import zmq
import threading
class Subscriber:
""" 订阅者 """
def __init__(self, context, address, tick_filter=""):
""" 构造函数
@param context 通信上下文, 进程唯一
@param address 发送tick信息的服务器的地址
@param tick_filter 过滤器, 通过设置过滤器来滤掉不需要的tick信息
"""
self.filter = tick_filter
self.context = context
self.address = address
self.socket = None
self.handler = None
self.quit_event = threading.Event()
self.quit_event.clear()
def start(self, callback=None):
""" 开始接收tick信息
@param callback 设置一个回调函数, 每次接收有用的tick信息后, 都会调用此函数, 如果不提供, 仅仅打印tick信息
"""
if callback and not hasattr(callback, "__call__"):
print("%s cannot be invoked" % str(callback))
return
# 如果工作线程已经存在, 应当首先关闭, 再创建新的
if self.handler and not self.quit_event:
self.quit_event.set()
self.handler.join()
# 开启工作线程
self.quit_event.clear()
self.handler = threading.Thread(target=Subscriber.work_thread,
args=(None, self.context, self.address, self.quit_event, callback))
self.handler.start()
def stop(self):
""" 停止接收tick信息 """
if self.handler:
self.quit_event.set()
self.handler.join()
self.handler = None
def work_thread(self, *args):
""" 工作线程 """
# 准备socket
socket = args[0].socket(zmq.SUB)
socket.connect(args[1])
socket.setsockopt_string(zmq.SUBSCRIBE, '')
print("Subscriber is collecting tick information......")
# 工作循环
quit_event = args[2]
callback = args[3]
while not quit_event.is_set():
tick_info = socket.recv_string()
if callback:
callback(tick_info)
else:
print(tick_info)
# 退出, 清除资源
socket.close()
quit_event.clear()
print("Subscriber has stopped, no more tick information will be collected.")
if __name__ == "__main__":
sub = Subscriber(zmq.Context(), "tcp://192.168.61.8:16888", tick_filter="")
sub.start()
| gpl-2.0 | 2,270,383,124,890,988,300 | 29.815789 | 107 | 0.553373 | false | 2.676571 | false | false | false |
dilawar/moose-core | tests/rdesigneur/test_72_CICR.py | 2 | 3935 | # This example demonstrates insertion of endo-compartments into the
# dendrite. Here endo_compartments are used for the endoplasmic reticulum
# (ER) in a model of Calcium Induced Calcium Release through the
# IP3 receptor. It generates a series of propagating waves of calcium.
# Note that units of permeability in the ConcChan are 1/(millimolar.sec)
#
# Copyright (C) Upinder S. Bhalla NCBS 2018
# Released under the terms of the GNU Public License V3.
# Converted to a test by Dilawar Singh, 2020
import matplotlib as mpl
mpl.use('Agg')
import os
import moose
print("[INFO ] Using moose from %s (%s)" % (moose.__file__, moose.version()))
import numpy as np
import rdesigneur as rd
np.set_printoptions(precision=3)
sdir_ = os.path.dirname(os.path.realpath(__file__))
E = (np.array([1.09014453e-07, 7.28082797e-13, 2.75389935e-08, 4.09373273e-01,
5.13839676e-04, 5.04392239e-04, 5.18535951e-04, 5.20332653e-04,
5.20319412e-04, 5.20315927e-04, 5.20315785e-04, 5.20315780e-04,
5.20315780e-04, 5.20315780e-04, 5.13839676e-04, 5.04392239e-04,
5.18535951e-04, 5.20332653e-04, 5.20319412e-04, 5.20315927e-04,
5.20315785e-04, 5.20315780e-04, 5.20315780e-04, 5.20315780e-04,
4.03334121e-01, 4.04616316e-01, 4.03839819e-01, 4.03873596e-01,
4.03877574e-01, 4.03877276e-01, 4.03877250e-01, 4.03877249e-01,
4.03877249e-01, 4.03877249e-01, 1.08136177e-06, 1.03726538e-06,
1.04624969e-06, 1.04989891e-06, 1.05005782e-06, 1.05006129e-06,
1.05006147e-06, 1.05006148e-06, 1.05006148e-06, 1.05006148e-06]),
np.array([2.64763531e-06, 3.53901405e-12, 1.06297817e-07, 2.59647692e-05,
1.50771752e-03, 1.44372345e-03, 1.46452771e-03, 1.46445738e-03,
1.46426743e-03, 1.46425938e-03, 1.46425914e-03, 1.46425913e-03,
1.46425913e-03, 1.46425913e-03, 1.50771752e-03, 1.44372345e-03,
1.46452771e-03, 1.46445738e-03, 1.46426743e-03, 1.46425938e-03,
1.46425914e-03, 1.46425913e-03, 1.46425913e-03, 1.46425913e-03,
1.26799318e-02, 1.15981501e-02, 1.19280784e-02, 1.20059244e-02,
1.20092971e-02, 1.20092807e-02, 1.20092772e-02, 1.20092772e-02,
1.20092772e-02, 1.20092772e-02, 2.11602709e-06, 2.06303080e-06,
2.08117025e-06, 2.08584557e-06, 2.08603181e-06, 2.08603541e-06,
2.08603560e-06, 2.08603562e-06, 2.08603562e-06, 2.08603562e-06])
)
def test_CICR():
"""Test CICR
"""
rdes = rd.rdesigneur(
turnOffElec=True,
chemDt=0.005,
chemPlotDt=0.02,
numWaveFrames=200,
diffusionLength=1e-6,
useGssa=False,
addSomaChemCompt=False,
addEndoChemCompt=True,
# cellProto syntax: ['somaProto', 'name', dia, length]
cellProto=[['somaProto', 'soma', 2e-6, 10e-6]],
chemProto=[[os.path.join(sdir_, 'chem', 'CICRwithConcChan.g'),
'chem']],
chemDistrib=[['chem', 'soma', 'install', '1']],
plotList=[
['soma', '1', 'dend/CaCyt', 'conc', 'Dendritic Ca'],
['soma', '1', 'dend/CaCyt', 'conc', 'Dendritic Ca', 'wave'],
['soma', '1', 'dend_endo/CaER', 'conc', 'ER Ca'],
['soma', '1', 'dend/ActIP3R', 'conc', 'active IP3R'],
],
)
rdes.buildModel()
IP3 = moose.element('/model/chem/dend/IP3')
IP3.vec.concInit = 0.004
IP3.vec[0].concInit = 0.02
moose.reinit()
moose.start(20)
data = [t.vector for t in moose.wildcardFind('/##[TYPE=Table2]')]
m, s = np.mean(data, axis=1), np.std(data, axis=1)
# print(np.array_repr(m))
# print(np.array_repr(s))
# In multithreaded mode, the numers are not exactly the same as in
# expected.
assert np.allclose(m, E[0], rtol=1e-2, atol=1e-4), (m - E[0])
# standard deviation could be very low in some cases.
print(np.sum(abs(s-E[1])) )
assert np.sum(abs(s-E[1])) < 1e-2, "Got %s" % np.sum(abs(s-E[1]))
print('done')
if __name__ == '__main__':
test_CICR()
| gpl-3.0 | 5,079,973,801,917,832,000 | 40.861702 | 78 | 0.637611 | false | 2.318798 | false | false | false |
toabctl/pymod2pkg | pymod2pkg.py | 1 | 2628 | import re
__version__ = '0.2.1'
class TranslationRule(object):
pass
class SingleRule(TranslationRule):
def __init__(self, mod, pkg, distmap=None):
self.mod = mod
self.pkg = pkg
self.distmap = distmap
def __call__(self, mod, dist):
if mod != self.mod:
return None
if self.distmap and dist:
for distrex in self.distmap:
if re.match(distrex, dist):
return self.distmap[distrex]
return self.pkg
class MultiRule(TranslationRule):
def __init__(self, mods, pkgfun):
self.mods = mods
self.pkgfun = pkgfun
def __call__(self, mod, dist):
if mod in self.mods:
return self.pkgfun(mod)
return None
def default_tr(mod):
pkg = mod.rsplit('-python')[0]
pkg = pkg.replace('_', '-').replace('.', '-').lower()
if not pkg.startswith('python-'):
pkg = 'python-' + pkg
return pkg
def exact_tr(mod):
return mod
def openstack_prefix_tr(mod):
return 'openstack-' + mod
RDO_PKG_MAP = [
# This demonstrates per-dist filter
#SingleRule('sphinx', 'python-sphinx',
# distmap={'epel-6': 'python-sphinx10'}),
SingleRule('distribute', 'python-setuptools'),
SingleRule('pyopenssl', 'pyOpenSSL'),
SingleRule('IPy', 'python-IPy'),
SingleRule('pycrypto', 'python-crypto'),
SingleRule('pyzmq', 'python-zmq'),
SingleRule('mysql-python', 'MySQL-python'),
SingleRule('PasteDeploy', 'python-paste-deploy'),
SingleRule('sqlalchemy-migrate', 'python-migrate'),
SingleRule('qpid-python', 'python-qpid'),
SingleRule('posix_ipc', 'python-posix_ipc'),
SingleRule('oslosphinx', 'python-oslo-sphinx'),
MultiRule(
mods=['PyYAML', 'm2crypto', 'numpy', 'pyflakes', 'pylint', 'pyparsing',
'pytz', 'pysendfile', 'libvirt-python'],
pkgfun=lambda x: x),
MultiRule(
mods=['nova', 'keystone', 'glance', 'swift', 'neutron'],
pkgfun=openstack_prefix_tr),
]
SUSE_PKG_MAP = [
# Do what you gotta do ;)
]
def get_pkg_map(dist):
if dist.lower().find('suse') != -1:
return SUSE_PKG_MAP
return RDO_PKG_MAP
def module2package(mod, dist, pkg_map=None):
"""Return a corresponding package name for a python module.
mod -- python module name
dist -- a linux distribution as returned by
`platform.linux_distribution()[0]`
"""
if not pkg_map:
pkg_map = get_pkg_map(dist)
for rule in pkg_map:
pkg = rule(mod, dist)
if pkg:
return pkg
return default_tr(mod)
| apache-2.0 | 4,438,163,807,946,193,000 | 24.514563 | 79 | 0.584855 | false | 3.541779 | false | false | false |
DougBurke/astropy | astropy/io/votable/exceptions.py | 2 | 46192 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
.. _warnings:
Warnings
--------
.. note::
Most of the following warnings indicate violations of the VOTable
specification. They should be reported to the authors of the
tools that produced the VOTable file.
To control the warnings emitted, use the standard Python
:mod:`warnings` module. Most of these are of the type
`VOTableSpecWarning`.
{warnings}
.. _exceptions:
Exceptions
----------
.. note::
This is a list of many of the fatal exceptions emitted by vo.table
when the file does not conform to spec. Other exceptions may be
raised due to unforeseen cases or bugs in vo.table itself.
{exceptions}
"""
# STDLIB
import io
import re
from textwrap import dedent
from warnings import warn
from ...utils.exceptions import AstropyWarning
__all__ = [
'warn_or_raise', 'vo_raise', 'vo_reraise', 'vo_warn',
'warn_unknown_attrs', 'parse_vowarning', 'VOWarning',
'VOTableChangeWarning', 'VOTableSpecWarning',
'UnimplementedWarning', 'IOWarning', 'VOTableSpecError']
MAX_WARNINGS = 10
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ('?', '?')
filename = config.get('filename', '?')
return '{}:{}:{}: {}: {}'.format(filename, pos[0], pos[1], name, message)
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault('_warning_counts', dict()).setdefault(warning_class, 0)
config['_warning_counts'][warning_class] += 1
message_count = config['_warning_counts'][warning_class]
if message_count <= MAX_WARNINGS:
if message_count == MAX_WARNINGS:
warning.formatted_message += \
' (suppressing further warnings of this type...)'
warn(warning, stacklevel=stacklevel+1)
def warn_or_raise(warning_class, exception_class=None, args=(), config=None,
pos=None, stacklevel=1):
"""
Warn or raise an exception, depending on the pedantic setting.
"""
if config is None:
config = {}
if config.get('pedantic'):
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
else:
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel+1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=''):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += ' ' + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel+1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel+1)
_warning_pat = re.compile(
(r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): " +
r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$"))
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result['warning'] = warning = match.group('warning')
if warning is not None:
result['is_warning'] = (warning[0].upper() == 'W')
result['is_exception'] = not result['is_warning']
result['number'] = int(match.group('warning')[1:])
result['doc_url'] = "io/votable/api_exceptions.html#{0}".format(
warning.lower())
else:
result['is_warning'] = False
result['is_exception'] = False
result['is_other'] = True
result['number'] = None
result['doc_url'] = None
try:
result['nline'] = int(match.group('nline'))
except ValueError:
result['nline'] = 0
try:
result['nchar'] = int(match.group('nchar'))
except ValueError:
result['nchar'] = 0
result['message'] = match.group('rest')
result['is_something'] = True
else:
result['warning'] = None
result['is_warning'] = False
result['is_exception'] = False
result['is_other'] = False
result['is_something'] = False
if not isinstance(line, str):
line = line.decode('utf-8')
result['message'] = line
return result
class VOWarning(AstropyWarning):
"""
The base class of all VO warnings and exceptions.
Handles the formatting of the message with a warning or exception
code, filename, line and column number.
"""
default_args = ()
message_template = ''
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args, )
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos)
Warning.__init__(self, self.formatted_message)
def __str__(self):
return self.formatted_message
@classmethod
def get_short_name(cls):
if len(cls.default_args):
return cls.message_template.format(*cls.default_args)
return cls.message_template
class VOTableChangeWarning(VOWarning, SyntaxWarning):
"""
A change has been made to the input XML file.
"""
class VOTableSpecWarning(VOWarning, SyntaxWarning):
"""
The input XML file violates the spec, but there is an obvious workaround.
"""
class UnimplementedWarning(VOWarning, SyntaxWarning):
"""
A feature of the VOTABLE_ spec is not implemented.
"""
class IOWarning(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
class VOTableSpecError(VOWarning, ValueError):
"""
The input XML file violates the spec and there is no good workaround.
"""
class W01(VOTableSpecWarning):
"""
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``vo.table`` supports this convention when not in
:ref:`pedantic-mode`.
``vo.table`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
class W02(VOTableSpecWarning):
r"""
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``vo.table`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <http://www.w3.org/TR/REC-xml/#NT-Name>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ('x', 'y')
class W03(VOTableChangeWarning):
"""
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``vo.table`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ('x', 'y')
class W04(VOTableSpecWarning):
"""
The ``content-type`` attribute must use MIME content-type syntax as
defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__.
The current check for validity is somewhat over-permissive.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "content-type '{}' must be a valid MIME content type"
default_args = ('x',)
class W05(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<http://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ('x',)
class W06(VOTableSpecWarning):
"""
This warning is emitted when a ``ucd`` attribute does not match
the syntax of a `unified content descriptor
<http://vizier.u-strasbg.fr/doc/UCD.htx>`__.
If the VOTable version is 1.2 or later, the UCD will also be
checked to ensure it conforms to the controlled vocabulary defined
by UCD1+.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__
"""
message_template = "Invalid UCD '{}': {}"
default_args = ('x', 'explanation')
class W07(VOTableSpecWarning):
"""
As astro year field is a Besselian or Julian year matching the
regular expression::
^[JB]?[0-9]+([.][0-9]*)?$
Defined in this XML Schema snippet::
<xs:simpleType name="astroYear">
<xs:restriction base="xs:token">
<xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/>
</xs:restriction>
</xs:simpleType>
"""
message_template = "Invalid astroYear in {}: '{}'"
default_args = ('x', 'y')
class W08(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``vo.table``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ('x',)
class W09(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``vo.table`` accepts ``id`` and emits this warning when
not in ``pedantic`` mode.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
class W10(VOTableSpecWarning):
"""
The parser has encountered an element that does not exist in the
specification, or appears in an invalid context. Check the file
against the VOTable schema (with a tool such as `xmllint
<http://xmlsoft.org/xmllint.html>`__. If the file validates
against the schema, and you still receive this warning, this may
indicate a bug in ``vo.table``.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Unknown tag '{}'. Ignoring"
default_args = ('x',)
class W11(VOTableSpecWarning):
"""
Earlier versions of the VOTable specification used a ``gref``
attribute on the ``LINK`` element to specify a `GLU reference
<http://aladin.u-strasbg.fr/glu/>`__. New files should
specify a ``glu:`` protocol using the ``href`` attribute.
Since ``vo.table`` does not currently support GLU references, it
likewise does not automatically convert the ``gref`` attribute to
the new form.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "The gref attribute on LINK is deprecated in VOTable 1.1"
class W12(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and *pedantic mode* is off,
``vo.table`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes")
default_args = ('x',)
class W13(VOTableSpecWarning):
"""
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ('x', 'y')
# W14: Deprecated
class W15(VOTableSpecWarning):
"""
The ``name`` attribute is required on every ``FIELD`` element.
However, many VOTable files in the wild omit it and provide only
an ``ID`` instead. In this case, when *pedantic mode* is off,
``vo.table`` will copy the ``name`` attribute to a new ``ID``
attribute.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "{} element missing required 'name' attribute"
default_args = ('x',)
# W16: Deprecated
class W17(VOTableSpecWarning):
"""
A ``DESCRIPTION`` element can only appear once within its parent
element.
According to the schema, it may only occur once (`1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__)
However, it is a `proposed extension
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__
to VOTable 1.2.
"""
message_template = "{} element contains more than one DESCRIPTION element"
default_args = ('x',)
class W18(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If *pedantic mode*
is off, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = 'TABLE specified nrows={}, but table contains {} rows'
default_args = ('x', 'y')
class W19(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded FITS file. If *pedantic
mode* is off, the embedded FITS file will take precedence.
"""
message_template = (
'The fields defined in the VOTable do not match those in the ' +
'embedded FITS file')
class W20(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = 'No version number specified in file. Assuming {}'
default_args = ('1.1',)
class W21(UnimplementedWarning):
"""
Unknown issues may arise using ``vo.table`` with VOTable files
from a version other than 1.1, 1.2 or 1.3.
"""
message_template = (
'vo.table is designed for VOTable version 1.1, 1.2 and 1.3, but ' +
'this file is {}')
default_args = ('x',)
class W22(VOTableSpecWarning):
"""
Version 1.0 of the VOTable specification used the ``DEFINITIONS``
element to define coordinate systems. Version 1.1 now uses
``COOSYS`` elements throughout the document.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__
"""
message_template = 'The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring'
class W23(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ('x',)
class W24(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of vo.table. This may cause problems or limited
features performing service queries. Consider upgrading vo.table
to the latest version.
"""
message_template = "The VO catalog database is for a later version of vo.table"
class W25(IOWarning):
"""
A VO service query failed due to a network error or malformed
arguments. Another alternative service may be attempted. If all
services fail, an exception will be raised.
"""
message_template = "'{}' failed with: {}"
default_args = ('service', '...')
class W26(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ('child', 'parent', 'X.X')
class W27(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
class W28(VOTableSpecWarning):
"""
The given attribute was not supported on the given element until the
specified VOTable version, however the version declared in the file is
for an earlier version. These attributes may not be written out to
the file.
"""
message_template = "'{}' on '{}' added in VOTable {}"
default_args = ('attribute', 'element', 'X.X')
class W29(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ('v1.0',)
class W30(VOTableSpecWarning):
"""
Some VOTable files write missing floating-point values in non-standard
ways, such as "null" and "-". In non-pedantic mode, any non-standard
floating-point literals are treated as missing values.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid literal for float '{}'. Treating as empty."
default_args = ('x',)
class W31(VOTableSpecWarning):
"""
Since NaN's can not be represented in integer fields directly, a null
value must be specified in the FIELD descriptor to support reading
NaN's from the tabledata.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "NaN given in an integral field without a specified null value"
class W32(VOTableSpecWarning):
"""
Each field in a table must have a unique ID. If two or more fields
have the same ID, some will be renamed to ensure that all IDs are
unique.
From the VOTable 1.2 spec:
The ``ID`` and ``ref`` attributes are defined as XML types
``ID`` and ``IDREF`` respectively. This means that the
contents of ``ID`` is an identifier which must be unique
throughout a VOTable document, and that the contents of the
``ref`` attribute represents a reference to an identifier
which must exist in the VOTable document.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness"
default_args = ('x', 'x_2')
class W33(VOTableChangeWarning):
"""
Each field in a table must have a unique name. If two or more
fields have the same name, some will be renamed to ensure that all
names are unique.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Column name '{}' renamed to '{}' to ensure uniqueness"
default_args = ('x', 'x_2')
class W34(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ('x', 'y')
class W35(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ('x',)
class W36(VOTableSpecWarning):
"""
If the field specifies a ``null`` value, that value must conform
to the given ``datatype``.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "null value '{}' does not match field datatype, setting to 0"
default_args = ('x',)
class W37(UnimplementedWarning):
"""
The 3 datatypes defined in the VOTable specification and supported by
vo.table are ``TABLEDATA``, ``BINARY`` and ``FITS``.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ('x',)
class W38(VOTableSpecWarning):
"""
The only encoding for local binary data supported by the VOTable
specification is base64.
"""
message_template = "Inline binary data must be base64 encoded, got '{}'"
default_args = ('x',)
class W39(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
class W40(VOTableSpecWarning):
"""
This is a terrible hack to support Simple Image Access Protocol
results from `archive.noao.edu <http://archive.noao.edu>`__. It
creates a field for the coordinate projection type of type "double",
which actually contains character data. We have to hack the field
to store character data, or we can't read it in. A warning will be
raised when this happens.
"""
message_template = "'cprojection' datatype repaired"
class W41(VOTableSpecWarning):
"""
An XML namespace was specified on the ``VOTABLE`` element, but the
namespace does not match what is expected for a ``VOTABLE`` file.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
Some files in the wild set the namespace to the location of the
VOTable schema, which is not correct and will not pass some
validating parsers.
"""
message_template = (
"An XML namespace is specified, but is incorrect. Expected " +
"'{}', got '{}'")
default_args = ('x', 'y')
class W42(VOTableSpecWarning):
"""
The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
class W43(VOTableSpecWarning):
"""
Referenced elements should be defined before referees. From the
VOTable 1.2 spec:
In VOTable1.2, it is further recommended to place the ID
attribute prior to referencing it whenever possible.
"""
message_template = "{} ref='{}' which has not already been defined"
default_args = ('element', 'x',)
class W44(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ('element',)
class W45(VOWarning, ValueError):
"""
The ``content-role`` attribute on the ``LINK`` element must be one of
the following::
query, hints, doc, location
And in VOTable 1.3, additionally::
type
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
`1.3
<http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__
"""
message_template = "content-role attribute '{}' invalid"
default_args = ('x',)
class W46(VOTableSpecWarning):
"""
The given char or unicode string is too long for the specified
field length.
"""
message_template = "{} value is too long for specified length of {}"
default_args = ('char or unicode', 'x')
class W47(VOTableSpecWarning):
"""
If no arraysize is specified on a char field, the default of '1'
is implied, but this is rarely what is intended.
"""
message_template = "Missing arraysize indicates length 1"
class W48(VOTableSpecWarning):
"""
The attribute is not defined in the specification.
"""
message_template = "Unknown attribute '{}' on {}"
default_args = ('attribute', 'element')
class W49(VOTableSpecWarning):
"""
Prior to VOTable 1.3, the empty cell was illegal for integer
fields.
If a \"null\" value was specified for the cell, it will be used
for the value, otherwise, 0 will be used.
"""
message_template = "Empty cell illegal for integer fields."
class W50(VOTableSpecWarning):
"""
Invalid unit string as defined in the `Standards for Astronomical
Catalogues, Version 2.0
<http://cdsarc.u-strasbg.fr/doc/catstd-3.2.htx>`_.
Consider passing an explicit ``unit_format`` parameter if the units
in this file conform to another specification.
"""
message_template = "Invalid unit string '{}'"
default_args = ('x',)
class W51(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ('x', 'n-bit')
class W52(VOTableSpecWarning):
"""
The BINARY2 format was introduced in VOTable 1.3. It should
not be present in files marked as an earlier version.
"""
message_template = ("The BINARY2 format was introduced in VOTable 1.3, but "
"this file is declared as version '{}'")
default_args = ('1.2',)
class W53(VOTableSpecWarning):
"""
The VOTABLE element must contain at least one RESOURCE element.
"""
message_template = ("VOTABLE element must contain at least one RESOURCE element.")
default_args = ()
class E01(VOWarning, ValueError):
"""
The size specifier for a ``char`` or ``unicode`` field must be
only a number followed, optionally, by an asterisk.
Multi-dimensional size specifiers are not supported for these
datatypes.
Strings, which are defined as a set of characters, can be
represented in VOTable as a fixed- or variable-length array of
characters::
<FIELD name="unboundedString" datatype="char" arraysize="*"/>
A 1D array of strings can be represented as a 2D array of
characters, but given the logic above, it is possible to define a
variable-length array of fixed-length strings, but not a
fixed-length array of variable-length strings.
"""
message_template = "Invalid size specifier '{}' for a {} field (in field '{}')"
default_args = ('x', 'char/unicode', 'y')
class E02(VOWarning, ValueError):
"""
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. " +
"Expected multiple of {}, got {}")
default_args = ('x', 'y')
class E03(VOWarning, ValueError):
"""
Complex numbers should be two values separated by whitespace.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' does not parse as a complex number"
default_args = ('x',)
class E04(VOWarning, ValueError):
"""
A ``bit`` array should be a string of '0's and '1's.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid bit value '{}'"
default_args = ('x',)
class E05(VOWarning, ValueError):
r"""
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ('x',)
class E06(VOWarning, ValueError):
"""
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ('x', 'y')
# E07: Deprecated
class E08(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ('x',)
class E09(VOWarning, ValueError):
"""
The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a
``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "'{}' must have a value attribute"
default_args = ('x',)
class E10(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ('FIELD',)
class E11(VOWarning, ValueError):
"""
The precision attribute is meant to express the number of significant
digits, either as a number of decimal places (e.g. ``precision="F2"`` or
equivalently ``precision="2"`` to express 2 significant figures
after the decimal point), or as a number of significant figures
(e.g. ``precision="E5"`` indicates a relative precision of 10-5).
It is validated using the following regular expression::
[EF]?[1-9][0-9]*
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "precision '{}' is invalid"
default_args = ('x',)
class E12(VOWarning, ValueError):
"""
The width attribute is meant to indicate to the application the
number of characters to be used for input or output of the
quantity.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "width must be a positive integer, got '{}'"
default_args = ('x',)
class E13(VOWarning, ValueError):
r"""
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ('x',)
class E14(VOWarning, ValueError):
"""
All ``PARAM`` elements must have a ``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "value attribute is required for all PARAM elements"
class E15(VOWarning, ValueError):
"""
All ``COOSYS`` elements must have an ``ID`` attribute.
Note that the VOTable 1.1 specification says this attribute is
optional, but its corresponding schema indicates it is required.
In VOTable 1.2, the ``COOSYS`` element is deprecated.
"""
message_template = "ID attribute is required for all COOSYS elements"
class E16(VOTableSpecWarning):
"""
The ``system`` attribute on the ``COOSYS`` element must be one of the
following::
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', 'geo_app'
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__
"""
message_template = "Invalid system attribute '{}'"
default_args = ('x',)
class E17(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
class E18(VOWarning, ValueError):
"""
The ``type`` attribute of the ``RESOURCE`` element must be one of
"results" or "meta".
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "type must be 'results' or 'meta', not '{}'"
default_args = ('x',)
class E19(VOWarning, ValueError):
"""
Raised either when the file doesn't appear to be XML, or the root
element is not VOTABLE.
"""
message_template = "File does not appear to be a VOTABLE"
class E20(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ('x',)
class E21(VOWarning, ValueError):
"""
The table had *x* fields defined, but the data itself has only *y*
columns.
"""
message_template = "Data has fewer columns ({}) than are defined in the header ({})"
default_args = ('x', 'y')
def _get_warning_and_exception_classes(prefix):
classes = []
for key, val in globals().items():
if re.match(prefix + "[0-9]{2}", key):
classes.append((key, val))
classes.sort()
return classes
def _build_doc_string():
def generate_set(prefix):
classes = _get_warning_and_exception_classes(prefix)
out = io.StringIO()
for name, cls in classes:
out.write(".. _{}:\n\n".format(name))
msg = "{}: {}".format(cls.__name__, cls.get_short_name())
if not isinstance(msg, str):
msg = msg.decode('utf-8')
out.write(msg)
out.write('\n')
out.write('~' * len(msg))
out.write('\n\n')
doc = cls.__doc__
if not isinstance(doc, str):
doc = doc.decode('utf-8')
out.write(dedent(doc))
out.write('\n\n')
return out.getvalue()
warnings = generate_set('W')
exceptions = generate_set('E')
return {'warnings': warnings,
'exceptions': exceptions}
if __doc__ is not None:
__doc__ = __doc__.format(**_build_doc_string())
__all__.extend([x[0] for x in _get_warning_and_exception_classes('W')])
__all__.extend([x[0] for x in _get_warning_and_exception_classes('E')])
| bsd-3-clause | -7,840,791,556,070,667,000 | 30.80854 | 102 | 0.641731 | false | 3.50292 | true | false | false |
ZombieNinjaPirate/pypkg | FileProcessing/Read.py | 2 | 1963 | """
Copyright (c) 2014, Are Hansen - Honeypot Development.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND AN EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'Are Hansen'
__date__ = '2014, July 25'
__version__ = '0.0.1'
def filelines(file_obj):
"""Expects that the file_obj is a list containing the name of the files that sould be read,
including the path to the directory in which they are located. Each line of these files are
appended to the file_lines and returned from the function. """
file_lines = []
file_dict = {}
for obj in file_obj:
with open(obj, 'r') as lines:
for line in lines.readlines():
file_lines.append(line)
file_dict[obj] = file_lines
file_lines = []
return file_dict
| gpl-3.0 | 5,917,760,030,639,987,000 | 39.895833 | 100 | 0.741722 | false | 4.362222 | false | false | false |
kennethreitz/dynamo | dynamo.py | 1 | 3285 | # -*- coding: utf-8 -*-
import boto
from numbers import Number
from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError
class Table(object):
def __init__(self, table=None, eager=False):
self.table = table
self.is_eager = eager
def __repr__(self):
return '<table \'{0}\'>'.format(self.name)
@property
def name(self):
return self.table.__dict__['_dict']['TableName']
def item(self, item):
return Item(item, self)
def delete(self):
return self.table.delete()
def scale(self, read=None, write=None):
read = read or self.table.read_units
write = write or self.table.read_units
return self.table.update_throughput(read_units=read, write_units=write)
def __getitem__(self, key):
try:
if isinstance(key, (basestring, Number)):
key = [key]
i = self.table.get_item(*key)
i = self.item(i)
except DynamoDBKeyNotFoundError:
return self.__magic_get(key)
return i
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __setitem__(self, key, values):
if isinstance(key, (basestring, Number)):
key = [key]
i = self.table.new_item(*key, attrs=values)
i = self.item(i)
i.put()
return i
def __delitem__(self, key):
return self[key].delete()
def __magic_get(self, key):
if self.is_eager:
self[key] = {}
return self.item(self[key])
def __contains__(self, key):
return not self.get(key) is None
def new(self, name):
table = self.table.layer2.create_table(
name=name,
schema=self.table._schema,
read_units=self.table.read_units,
write_units=self.table.write_units
)
return Table(table=table, eager=self.is_eager)
class Item(object):
def __init__(self, item, table):
self.item = item
self.table = table
@property
def is_eager(self):
return self.table.is_eager
def __getattr__(self, key):
if not key in ['item']:
try:
return getattr(object, key)
except AttributeError:
return getattr(self.item, key)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(self.item)
def __getitem__(self, key):
return self.item[key]
def __setitem__(self, key, value):
self.item[key] = value
if self.is_eager:
self.item.save()
def __contains__(self, key):
return key in self.item
def table(name, auth=None, eager=True):
"""Returns a given table for the given user."""
auth = auth or []
dynamodb = boto.connect_dynamodb(*auth)
table = dynamodb.get_table(name)
return Table(table=table, eager=eager)
def tables(auth=None, eager=True):
"""Returns a list of tables for the given user."""
auth = auth or []
dynamodb = boto.connect_dynamodb(*auth)
return [table(t, auth, eager=eager) for t in dynamodb.list_tables()]
| isc | 425,078,682,580,047,700 | 24.866142 | 79 | 0.56347 | false | 3.860165 | false | false | false |
Teagan42/home-assistant | homeassistant/components/homekit_controller/alarm_control_panel.py | 2 | 3958 | """Support for Homekit Alarm Control Panel."""
import logging
from homekit.model.characteristics import CharacteristicsTypes
from homeassistant.components.alarm_control_panel import AlarmControlPanel
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from . import KNOWN_DEVICES, HomeKitEntity
ICON = "mdi:security"
_LOGGER = logging.getLogger(__name__)
CURRENT_STATE_MAP = {
0: STATE_ALARM_ARMED_HOME,
1: STATE_ALARM_ARMED_AWAY,
2: STATE_ALARM_ARMED_NIGHT,
3: STATE_ALARM_DISARMED,
4: STATE_ALARM_TRIGGERED,
}
TARGET_STATE_MAP = {
STATE_ALARM_ARMED_HOME: 0,
STATE_ALARM_ARMED_AWAY: 1,
STATE_ALARM_ARMED_NIGHT: 2,
STATE_ALARM_DISARMED: 3,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit alarm control panel."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(aid, service):
if service["stype"] != "security-system":
return False
info = {"aid": aid, "iid": service["iid"]}
async_add_entities([HomeKitAlarmControlPanel(conn, info)], True)
return True
conn.add_listener(async_add_service)
class HomeKitAlarmControlPanel(HomeKitEntity, AlarmControlPanel):
"""Representation of a Homekit Alarm Control Panel."""
def __init__(self, *args):
"""Initialise the Alarm Control Panel."""
super().__init__(*args)
self._state = None
self._battery_level = None
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.SECURITY_SYSTEM_STATE_CURRENT,
CharacteristicsTypes.SECURITY_SYSTEM_STATE_TARGET,
CharacteristicsTypes.BATTERY_LEVEL,
]
def _update_security_system_state_current(self, value):
self._state = CURRENT_STATE_MAP[value]
def _update_battery_level(self, value):
self._battery_level = value
@property
def icon(self):
"""Return icon."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
await self.set_alarm_state(STATE_ALARM_DISARMED, code)
async def async_alarm_arm_away(self, code=None):
"""Send arm command."""
await self.set_alarm_state(STATE_ALARM_ARMED_AWAY, code)
async def async_alarm_arm_home(self, code=None):
"""Send stay command."""
await self.set_alarm_state(STATE_ALARM_ARMED_HOME, code)
async def async_alarm_arm_night(self, code=None):
"""Send night command."""
await self.set_alarm_state(STATE_ALARM_ARMED_NIGHT, code)
async def set_alarm_state(self, state, code=None):
"""Send state command."""
characteristics = [
{
"aid": self._aid,
"iid": self._chars["security-system-state.target"],
"value": TARGET_STATE_MAP[state],
}
]
await self._accessory.put_characteristics(characteristics)
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
if self._battery_level is None:
return None
return {ATTR_BATTERY_LEVEL: self._battery_level}
| apache-2.0 | -334,830,814,066,145,860 | 29.21374 | 88 | 0.645528 | false | 3.598182 | false | false | false |
eljost/pysisyphus | pysisyphus/intcoords/Rotation.py | 1 | 5573 | # [1] http://dx.doi.org/10.1063/1.4952956
# Lee-Ping Wang, 2016
import numpy as np
from pysisyphus.intcoords.Primitive import Primitive
from pysisyphus.linalg import eigvec_grad
def compare_to_geometric(c3d, ref_c3d, dR, dF, dqdx, dvdx):
from geometric.rotate import get_R_der, get_F_der, get_q_der, get_expmap_der
dR_ref = get_R_der(c3d, ref_c3d)
np.testing.assert_allclose(dR, dR_ref)
dF_ref = get_F_der(c3d, ref_c3d)
np.testing.assert_allclose(dF.reshape(-1, 3, 4, 4), dF_ref)
dq_ref = get_q_der(c3d, ref_c3d)
np.testing.assert_allclose(dqdx.reshape(-1, 3, 4), dq_ref)
dvdx_ref = get_expmap_der(c3d, ref_c3d)
np.testing.assert_allclose(dvdx, dvdx_ref.reshape(-1, 3).T)
class Rotation(Primitive):
"""See (II. Theory) in [1], Eq. (3) - (14)"""
index = None
def __init__(self, *args, ref_coords3d, **kwargs):
super().__init__(*args, **kwargs)
self.calc_kwargs = ("index", "ref_coords3d")
self.ref_coords3d = ref_coords3d.reshape(-1, 3).copy()
@staticmethod
def _weight(atoms, coords3d, indices, f_damping):
return 1
@staticmethod
def to_origin(coords3d, indices):
return coords3d[indices] - coords3d[indices].mean(axis=0)
@staticmethod
def _calculate(coords3d, indices, gradient=False, index=0, ref_coords3d=None):
# Translate to origin by removing centroid
c3d = Rotation.to_origin(coords3d, indices)
ref_c3d = Rotation.to_origin(ref_coords3d, indices)
# Setup correlation matrix
R = c3d.T.dot(ref_c3d)
# Setup F matrix, Eq. (6) in [1]
F = np.zeros((4, 4))
R11, R12, R13, R21, R22, R23, R31, R32, R33 = R.flatten()
# Fill only upper triangular part.
F[0, 0] = R11 + R22 + R33
F[0, 1] = R23 - R32
F[0, 2] = R31 - R13
F[0, 3] = R12 - R21
#
F[1, 1] = R11 - R22 - R33
F[1, 2] = R12 + R21
F[1, 3] = R13 + R31
#
F[2, 2] = -R11 + R22 - R33
F[2, 3] = R23 + R32
#
F[3, 3] = -R11 - R22 + R33
# Eigenvalues, eigenvectors of upper triangular part.
w, v_ = np.linalg.eigh(F, UPLO="U")
# Quaternion corresponds to biggest (last) eigenvalue.
# np.linalg.eigh already returns sorted eigenvalues.
quat = v_[:, -1]
# Eigenvector sign is ambigous. Force first item to be positive,
# similar to geomeTRIC code.
if quat[0] < 0.0:
quat *= -1
# Eq. (8) in [1].
# v = 2 * q_i * (cos⁻¹(q_0) / sqrt(1 - q_0 ** 2)
#
# As q_0 approaches 1, the denominator becomes very small, and dividing
# by this small number results in numerical instability.
#
# According to wolframalpha v(q_0) limit approaches 2 for q_0 = 1.
#
# input: limit of (2 * arccos(x) / sqrt(1-x**2))
# output: lim v(x) for x -> 1 becomes 2.
q0 = quat[0]
if abs(q0 - 1.0) <= 1e-8:
prefac = 2
dvdq0 = 0.0
else:
arccos_q0 = np.arccos(q0)
diff = 1 - q0 ** 2
prefac = 2 * arccos_q0 / np.sqrt(diff)
dvdq0 = quat[1:] * (2 * q0 * arccos_q0 / diff ** 1.5 - 2 / diff)
# Exponential map
v = prefac * quat[1:]
if gradient:
# Gradient of correlation matrix
y1, y2, y3 = ref_c3d.T
dR = np.zeros((*c3d.shape, 3, 3))
dR[:, 0, 0, 0] = y1
dR[:, 0, 0, 1] = y2
dR[:, 0, 0, 2] = y3
#
dR[:, 1, 1, 0] = y1
dR[:, 1, 1, 1] = y2
dR[:, 1, 1, 2] = y3
#
dR[:, 2, 2, 0] = y1
dR[:, 2, 2, 1] = y2
dR[:, 2, 2, 2] = y3
dR11, dR12, dR13, dR21, dR22, dR23, dR31, dR32, dR33 = dR.reshape(-1, 9).T
# Gradient of F matrix. Construct full matrix, as we have to do a dot
# product later on.
dF = np.zeros((ref_c3d.size, 4, 4))
dF[:, 0, 0] = dR11 + dR22 + dR33
dF[:, 0, 1] = dR23 - dR32
dF[:, 0, 2] = dR31 - dR13
dF[:, 0, 3] = dR12 - dR21
#
dF[:, 1, 0] = dF[:, 0, 1]
dF[:, 1, 1] = dR11 - dR22 - dR33
dF[:, 1, 2] = dR12 + dR21
dF[:, 1, 3] = dR13 + dR31
#
dF[:, 2, 0] = dF[:, 0, 2]
dF[:, 2, 1] = dF[:, 1, 2]
dF[:, 2, 2] = -dR11 + dR22 - dR33
dF[:, 2, 3] = dR23 + dR32
#
dF[:, 3, 0] = dF[:, 0, 3]
dF[:, 3, 1] = dF[:, 1, 3]
dF[:, 3, 2] = dF[:, 2, 3]
dF[:, 3, 3] = -dR11 - dR22 + dR33
# Quaternion gradient
dqdx = eigvec_grad(w, v_, ind=-1, mat_grad=dF)
dvdq = np.zeros((4, 3))
dvdq[0] = dvdq0
dvdq[1:] = np.diag((prefac, prefac, prefac))
# Gradient of exponential map from chain rule.
# See bottom-left on 214108-3 in [1], after Eq. (11).
dvdx = np.einsum("ij,ki->jk", dvdq, dqdx)
# compare_to_geometric(c3d, ref_c3d, dR, dF, dqdx, dvdx)
row = np.zeros_like(coords3d)
row[indices] = dvdx[index].reshape(-1, 3)
return v[index], row.flatten()
return v[index]
@staticmethod
def _jacobian(coords3d, indices):
raise Exception("Not implemented!")
class RotationA(Rotation):
index = 0
class RotationB(Rotation):
index = 1
class RotationC(Rotation):
index = 2
| gpl-3.0 | -2,486,767,611,384,018,000 | 31.573099 | 86 | 0.49228 | false | 2.786393 | false | false | false |
hugoruscitti/quickdiagrams | quickdiagrams/pac_parser.py | 1 | 2076 | # -*- coding: utf-8 -*-
import re
import StringIO
# Licencia: GPLv3
# Author: Pablo Codeiro
# Cambios de hugoruscitti: que simule crear un archivo .sc y la función
# 'get_fakefile' que se llama desde quickdiagrams.
#
# por supuesto, aqui iria el autor, la fecha, la version
# que es GPL v3.0 y todas esas chorradas....
def esUnMensaje (unaLinea):
elementos = re.match("!(\w+) categoriesFor: #(\w+)! public! !",unaLinea)
if elementos:
return (elementos.group(1), elementos.group(2))
def esUnaClase (unaLinea):
elementos = re.match("\s+add: #(\w+);", unaLinea)
if elementos:
return (elementos.group(1))
def esUnaRelacion (unaLinea):
#elementos = re.match("\s+\w+\s*:=\s*(\w+)\s*new\s* \.",unaLinea)
elementos = re.match(".+:=\s+(\w+)\s*new\s*\..*",unaLinea)
if elementos:
return (elementos.group(1))
def get_fakefile(input_filename):
"Simula un archivo de disco, para realizar la conversion .pac -> .sc"
new_file = StringIO.StringIO()
last_file = open(input_filename, 'r')
dict = {}
claseActual = ""
for linea in last_file.readlines():
tmp_clase_actual = (re.match("!(\w+) methodsFor!", linea))
if tmp_clase_actual:
claseActual = tmp_clase_actual.group(1)
clase = esUnaClase(linea)
mens = esUnMensaje(linea)
relacion = esUnaRelacion(linea)
if clase:
if len(dict) == 1:
dict[clase] = clase
elif not dict.has_key(clase):
dict[clase] = clase
claseActual = clase
if mens:
(clase2,mensaje) = mens
dict[clase2] = dict[clase2] + "\n\t" + mensaje + "()"
claseActual = clase
if relacion:
new_file.write("%s <>- %s\n" %(claseActual, relacion))
for elemento in dict.values():
new_file.write(elemento + "\n");
last_file.close()
new_file.flush()
new_file.seek(0)
import pprint
pprint.pprint(new_file.readlines())
return new_file
| gpl-3.0 | -8,969,478,843,600,902,000 | 24 | 77 | 0.575904 | false | 3.002894 | false | false | false |
pmoleri/memorize-accesible | accessibilitytoolbar.py | 1 | 2689 | # Copyright (C) 2010 ceibalJAM! ceibaljam.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import gtk
from os.path import join, dirname
from gettext import gettext as _
from sugar import profile
import logging
from gobject import SIGNAL_RUN_FIRST, TYPE_PYOBJECT
_logger = logging.getLogger('memorize-activity')
class AccessibilityToolbar(gtk.Toolbar):
__gtype_name__ = 'AccessibilityToolbar'
__gsignals__ = {
'accessibility_changed': (SIGNAL_RUN_FIRST, None, 2 * [TYPE_PYOBJECT]),
}
def __init__(self, activity):
gtk.Toolbar.__init__(self)
self.activity = activity
self._lock = True
self.jobject = None
# Accessible mode checkbox
self._accessible = gtk.CheckButton(_('Accessible'))
self._accessible.connect('toggled', self._accessibility_changed)
self._add_widget(self._accessible)
# Scanning speed scale
min = 1
max = 5
step = 1
default = 2.5
self._speed_adj = gtk.Adjustment(default, min, max, step)
self._speed_bar = gtk.HScale(self._speed_adj)
self._speed_bar.set_draw_value(True)
self._speed_bar.set_update_policy(gtk.UPDATE_DISCONTINUOUS)
self._speed_bar.set_size_request(240,15)
self._speed_adj.connect("value_changed", self._accessibility_changed)
# Add it to the toolbar
self._add_widget(self._speed_bar)
def _add_widget(self, widget, expand=False):
tool_item = gtk.ToolItem()
tool_item.set_expand(expand)
tool_item.add(widget)
widget.show()
self.insert(tool_item, -1)
tool_item.show()
def _game_reset_cb(self, widget):
self.emit('game_changed', None, None, 'reset', None, None)
def _load_game(self, button):
pass
def _accessibility_changed(self, widget):
self.emit("accessibility_changed", self._accessible.get_active(), self._speed_bar.get_value())
| gpl-2.0 | 7,435,342,468,607,826,000 | 33.474359 | 102 | 0.643734 | false | 3.760839 | false | false | false |
Inboxen/infrastructure | inboxen/tests/test_models.py | 1 | 2962 | ##
# Copyright (C) 2014 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
import datetime
from django import test
from django.contrib.auth import get_user_model
from inboxen import models
class ModelTestCase(test.TestCase):
"""Test our custom methods"""
fixtures = ['inboxen_testdata.json']
def setUp(self):
super(ModelTestCase, self).setUp()
self.user = get_user_model().objects.get(id=1)
def test_inbox_create(self):
with self.assertRaises(models.Domain.DoesNotExist):
models.Inbox.objects.create()
domain = models.Domain.objects.get(id=1)
inbox = models.Inbox.objects.create(domain=domain, user=self.user)
self.assertIsInstance(inbox.created, datetime.datetime)
self.assertEqual(inbox.user, self.user)
def test_inbox_from_string(self):
inbox = models.Inbox.objects.select_related("domain").get(id=1)
email = "%s@%s" % (inbox.inbox, inbox.domain.domain)
inbox2 = inbox.user.inbox_set.from_string(email=email)
self.assertEqual(inbox, inbox2)
def test_inbox_from_string_and_user(self):
user = get_user_model().objects.create(username="bizz")
domain = models.Domain.objects.get(id=1)
inbox = models.Inbox.objects.create(domain=domain, user=user)
with self.assertRaises(models.Inbox.DoesNotExist):
self.user.inbox_set.from_string(email="%s@%s" % (inbox.inbox, domain.domain))
def test_header_create(self):
name = "X-Hello"
data = "Hewwo"
part = models.PartList.objects.get(id=1)
header1 = part.header_set.create(name=name, data=data, ordinal=0)
header2 = part.header_set.create(name=name, data=data, ordinal=1)
self.assertEqual(header1[0].name_id, header2[0].name_id)
self.assertEqual(header1[0].data_id, header2[0].data_id)
self.assertTrue(header1[1])
self.assertFalse(header2[1])
def test_body_get_or_create(self):
body_data = "Hello"
body1 = models.Body.objects.get_or_create(data=body_data)
body2 = models.Body.objects.get_or_create(data=body_data)
self.assertEqual(body1[0].id, body2[0].id)
self.assertTrue(body1[1])
self.assertFalse(body2[1])
| agpl-3.0 | 5,405,453,312,505,657,000 | 35.121951 | 89 | 0.671168 | false | 3.452214 | true | false | false |
dmpetrov/dataversioncontrol | dvc/command/params.py | 1 | 3018 | import argparse
import logging
from collections import OrderedDict
from dvc.command.base import CmdBase, append_doc_link, fix_subparsers
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
def _show_diff(diff, markdown=False):
from dvc.utils.diff import table
rows = []
for fname, pdiff in diff.items():
sorted_pdiff = OrderedDict(sorted(pdiff.items()))
for param, change in sorted_pdiff.items():
rows.append([fname, param, change["old"], change["new"]])
return table(["Path", "Param", "Old", "New"], rows, markdown)
class CmdParamsDiff(CmdBase):
def run(self):
try:
diff = self.repo.params.diff(
a_rev=self.args.a_rev,
b_rev=self.args.b_rev,
all=self.args.all,
)
if self.args.show_json:
import json
logger.info(json.dumps(diff))
else:
table = _show_diff(diff, self.args.show_md)
if table:
logger.info(table)
except DvcException:
logger.exception("failed to show params diff")
return 1
return 0
def add_parser(subparsers, parent_parser):
PARAMS_HELP = "Commands to display params."
params_parser = subparsers.add_parser(
"params",
parents=[parent_parser],
description=append_doc_link(PARAMS_HELP, "params"),
help=PARAMS_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
params_subparsers = params_parser.add_subparsers(
dest="cmd",
help="Use `dvc params CMD --help` to display command-specific help.",
)
fix_subparsers(params_subparsers)
PARAMS_DIFF_HELP = (
"Show changes in params between commits in the DVC repository, or "
"between a commit and the workspace."
)
params_diff_parser = params_subparsers.add_parser(
"diff",
parents=[parent_parser],
description=append_doc_link(PARAMS_DIFF_HELP, "params/diff"),
help=PARAMS_DIFF_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
params_diff_parser.add_argument(
"a_rev", nargs="?", help="Old Git commit to compare (defaults to HEAD)"
)
params_diff_parser.add_argument(
"b_rev",
nargs="?",
help=("New Git commit to compare (defaults to the current workspace)"),
)
params_diff_parser.add_argument(
"--all",
action="store_true",
default=False,
help="Show unchanged params as well.",
)
params_diff_parser.add_argument(
"--show-json",
action="store_true",
default=False,
help="Show output in JSON format.",
)
params_diff_parser.add_argument(
"--show-md",
action="store_true",
default=False,
help="Show tabulated output in the Markdown format (GFM).",
)
params_diff_parser.set_defaults(func=CmdParamsDiff)
| apache-2.0 | -343,513,829,253,882,200 | 28.300971 | 79 | 0.59841 | false | 3.976285 | false | false | false |
justanr/Flask-Transfer | examples/JPEGr/JPEGr/app.py | 1 | 1036 | from .config import Config
from .form import UploadForm
from .transfer import PDFTransfer, pdf_saver
from . import utils
from flask import (Flask, render_template, redirect, abort,
url_for, send_from_directory)
from flask_bootstrap import Bootstrap
import os
app = Flask(__name__)
app.config.from_object(Config)
Bootstrap(app)
Config.init_app(app)
@app.route('/')
def index():
return render_template('index.html', links=utils.build_image_links())
@app.route('/upload', methods=['GET', 'POST'])
def upload():
form = UploadForm()
if form.validate_on_submit():
meta = {'width': form.width.data or 1080}
PDFTransfer.save(form.pdf.data, destination=pdf_saver, metadata=meta)
return redirect(url_for('index'))
else:
return render_template('upload.html', form=form)
@app.route('/pdf/<pdf>')
def display_pdf(pdf):
path = utils.get_save_path(pdf)
if not os.path.exists(path):
abort(404)
else:
return send_from_directory(*os.path.split(path))
| mit | 8,498,478,475,064,519,000 | 25.564103 | 77 | 0.668919 | false | 3.407895 | false | false | false |
bszcz/python | lorentz_system.py | 1 | 1115 | # Copyright (c) 2009, 2014 Bartosz Szczesny <[email protected]>
# This program is free software under the MIT license.
import time
import visual
class Lorentz(object):
def __init__(self, beta, rho, sigma, x, y, z, dt):
# parameters
self.beta = beta
self.rho = rho
self.sigma = sigma
# coordinates
self.x = x
self.y = y
self.z = z
# time step
self.dt = dt
def get_xyz(self):
return self.x, self.y, self.z
def advance(self):
x_ = self.x
y_ = self.y
z_ = self.z
self.x = x_ + self.dt * self.sigma * (y_ - x_)
self.y = y_ + self.dt * (x_ * (self.rho - z_) - y_)
self.z = z_ + self.dt * (x_ * y_ - self.beta * z_)
def main():
visual.rate(100)
beta, rho, sigma = 8.0/3.0, 28.0, 10.0
x, y, z = 5.0, 5.0, 5.0
dt = 0.002
system = Lorentz(beta, rho, sigma, x, y, z, dt)
num_curves = 30
for i in range(num_curves):
curve = visual.curve(color = visual.color.white)
curve_max_points = 1000 # "skipping points" otherwise
for j in range(curve_max_points):
system.advance()
x, y, z = system.get_xyz()
curve.append(pos = (x, y, z))
if __name__ == "__main__":
main()
| mit | 879,304,051,776,253,300 | 21.755102 | 61 | 0.593722 | false | 2.397849 | false | false | false |
njsmith/h11 | h11/tests/test_events.py | 1 | 3711 | import pytest
from .._util import LocalProtocolError
from .. import _events
from .._events import *
def test_event_bundle():
class T(_events._EventBundle):
_fields = ["a", "b"]
_defaults = {"b": 1}
def _validate(self):
if self.a == 0:
raise ValueError
# basic construction and methods
t = T(a=1, b=0)
assert repr(t) == "T(a=1, b=0)"
assert t == T(a=1, b=0)
assert not (t == T(a=2, b=0))
assert not (t != T(a=1, b=0))
assert (t != T(a=2, b=0))
with pytest.raises(TypeError):
hash(t)
# check defaults
t = T(a=10)
assert t.a == 10
assert t.b == 1
# no positional args
with pytest.raises(TypeError):
T(1)
with pytest.raises(TypeError):
T(1, a=1, b=0)
# unknown field
with pytest.raises(TypeError):
T(a=1, b=0, c=10)
# missing required field
with pytest.raises(TypeError) as exc:
T(b=0)
# make sure we error on the right missing kwarg
assert 'kwarg a' in str(exc)
# _validate is called
with pytest.raises(ValueError):
T(a=0, b=0)
def test_events():
with pytest.raises(LocalProtocolError):
# Missing Host:
req = Request(method="GET", target="/", headers=[("a", "b")],
http_version="1.1")
# But this is okay (HTTP/1.0)
req = Request(method="GET", target="/", headers=[("a", "b")],
http_version="1.0")
# fields are normalized
assert req.method == b"GET"
assert req.target == b"/"
assert req.headers == [(b"a", b"b")]
assert req.http_version == b"1.0"
# This is also okay -- has a Host (with weird capitalization, which is ok)
req = Request(method="GET", target="/",
headers=[("a", "b"), ("hOSt", "example.com")],
http_version="1.1")
# we normalize header capitalization
assert req.headers == [(b"a", b"b"), (b"host", b"example.com")]
# Multiple host is bad too
with pytest.raises(LocalProtocolError):
req = Request(method="GET", target="/",
headers=[("Host", "a"), ("Host", "a")],
http_version="1.1")
# Even for HTTP/1.0
with pytest.raises(LocalProtocolError):
req = Request(method="GET", target="/",
headers=[("Host", "a"), ("Host", "a")],
http_version="1.0")
# Header values are validated
with pytest.raises(LocalProtocolError):
req = Request(method="GET", target="/",
headers=[("Host", "a"), ("Foo", " asd\x00")],
http_version="1.0")
ir = InformationalResponse(status_code=100, headers=[("Host", "a")])
assert ir.status_code == 100
assert ir.headers == [(b"host", b"a")]
assert ir.http_version == b"1.1"
with pytest.raises(LocalProtocolError):
InformationalResponse(status_code=200, headers=[("Host", "a")])
resp = Response(status_code=204, headers=[], http_version="1.0")
assert resp.status_code == 204
assert resp.headers == []
assert resp.http_version == b"1.0"
with pytest.raises(LocalProtocolError):
resp = Response(status_code=100, headers=[], http_version="1.0")
with pytest.raises(LocalProtocolError):
Response(status_code="100", headers=[], http_version="1.0")
with pytest.raises(LocalProtocolError):
InformationalResponse(status_code=b"100",
headers=[], http_version="1.0")
d = Data(data=b"asdf")
assert d.data == b"asdf"
eom = EndOfMessage()
assert eom.headers == []
cc = ConnectionClosed()
assert repr(cc) == "ConnectionClosed()"
| mit | 5,725,662,533,083,317,000 | 29.925 | 78 | 0.552142 | false | 3.547801 | true | false | false |
opennetworkinglab/spring-open | scripts/perf-scripts/generate_flows.py | 1 | 2622 | #! /usr/bin/env python
# -*- Mode: python; py-indent-offset: 4; tab-width: 8; indent-tabs-mode: t; -*-
#
# A script for generating a number of flows.
#
# The output of the script should be saved to a file, and the flows from
# that file should be added by the following command:
#
# web/add_flow.py -f filename
#
# NOTE: Currently, some of the parameters fo the flows are hard-coded,
# and all flows are between same source and destination DPID and ports
# (differentiated by different matchSrcMac and matchDstMac).
#
import copy
import pprint
import os
import sys
import subprocess
import json
import argparse
import io
import time
## Global Var ##
DEBUG=0
pp = pprint.PrettyPrinter(indent=4)
## Worker Functions ##
def log_error(txt):
print '%s' % (txt)
def debug(txt):
if DEBUG:
print '%s' % (txt)
if __name__ == "__main__":
usage_msg = "Generate a number of flows by using a pre-defined template.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "NOTE: This script is work-in-progress. Currently all flows are within same\n"
usage_msg = usage_msg + "pair of switch ports and contain auto-generated MAC-based matching conditions.\n"
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + "Usage: %s <begin-flow-id> <end-flow-id>\n" % (sys.argv[0])
usage_msg = usage_msg + "\n"
usage_msg = usage_msg + " The output should be saved to a file, and the flows should be installed\n"
usage_msg = usage_msg + " by using the command './add_flow.py -f filename'\n"
# app.debug = False;
# Usage info
if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
print(usage_msg)
exit(0)
# Check arguments
if len(sys.argv) < 3:
log_error(usage_msg)
exit(1)
# Extract the arguments
begin_flow_id = int(sys.argv[1], 0)
end_flow_id = int(sys.argv[2], 0)
if begin_flow_id > end_flow_id:
log_error(usage_msg)
exit(1)
#
# Do the work
#
# NOTE: Currently, up to 65536 flows are supported.
# More flows can be supported by iterating by, say, iterating over some of
# the other bytes of the autogenereated source/destination MAC addresses.
#
flow_id = begin_flow_id
idx = 0
while flow_id <= end_flow_id:
mac3 = idx / 255
mac4 = idx % 255
str_mac3 = "%0.2x" % mac3
str_mac4 = "%0.2x" % mac4
src_mac = "00:00:" + str_mac3 + ":" + str_mac4 + ":00:00";
dst_mac = "00:01:" + str_mac3 + ":" + str_mac4 + ":00:00";
print "%s FOOBAR 00:00:00:00:00:00:00:01 1 00:00:00:00:00:00:00:01 2 matchSrcMac %s matchDstMac %s" % (flow_id, src_mac, dst_mac)
flow_id = flow_id + 1
idx = idx + 1
| apache-2.0 | -6,666,195,969,652,934,000 | 28.133333 | 133 | 0.644546 | false | 2.966063 | false | false | false |
ChawalitK/odoo | addons/purchase/res_config.py | 1 | 3123 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_config_settings(osv.osv_memory):
_name = 'purchase.config.settings'
_inherit = 'res.config.settings'
_columns = {
'group_product_variant': fields.selection([
(0, "No variants on products"),
(1, 'Products can have several attributes, defining variants (Example: size, color,...)')
], "Product Variants",
help='Work with product variant allows you to define some variant of the same products, an ease the product management in the ecommerce for example',
implied_group='product.group_product_variant'),
'group_uom':fields.selection([
(0, 'Products have only one unit of measure (easier)'),
(1, 'Some products may be sold/puchased in different units of measure (advanced)')
], "Units of Measure",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products."""),
'group_costing_method':fields.selection([
(0, 'Set a fixed cost price on each product'),
(1, "Use a 'Fixed', 'Real' or 'Average' price costing method")
], "Costing Methods",
implied_group='stock_account.group_inventory_valuation',
help="""Allows you to compute product cost price based on average cost."""),
'module_purchase_requisition': fields.selection([
(0, 'Purchase propositions trigger draft purchase orders to a single supplier'),
(1, 'Allow using call for tenders to get quotes from multiple suppliers (advanced)')
], "Calls for Tenders",
help="Calls for tenders are used when you want to generate requests for quotations to several vendors for a given set of products.\n"
"You can configure per product if you directly do a Request for Quotation "
"to one vendor or if you want a Call for Tenders to compare offers from several vendors."),
'group_warning_purchase': fields.selection([
(0, 'All the products and the customers can be used in purchase orders'),
(1, 'An informative or blocking warning can be set on a product or a customer')
], "Warning", implied_group='purchase.group_warning_purchase'),
'module_stock_dropshipping': fields.selection([
(0, 'Suppliers always deliver to your warehouse(s)'),
(1, "Allow suppliers to deliver directly to your customers")
], "Dropshipping",
help='\nCreates the dropship Route and add more complex tests\n'
'-This installs the module stock_dropshipping.'),
'group_manage_vendor_price': fields.selection([
(0, 'Manage vendor price on the product form'),
(1, 'Allow using and importing vendor pricelists')
], "Vendor Price",
implied_group="purchase.group_manage_vendor_price"),
}
| gpl-3.0 | 341,948,360,128,395,300 | 59.057692 | 161 | 0.634006 | false | 4.480631 | false | false | false |
Cloud-Elasticity-Services/as-libcloud | libcloud/autoscale/types.py | 1 | 2263 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"Provider",
"AutoScaleAdjustmentType"
]
class Provider(object):
"""
Defines for each of the supported providers
:cvar AWS_AUTOSCALE: Amazon AutoScale
:cvar SOFTLAYER: Softlayer
"""
AWS_AUTOSCALE = 'aws_autoscale'
SOFTLAYER = 'softlayer'
OPENSTACK = 'openstack'
class AutoScaleAdjustmentType(object):
"""
The logic to be used to scale the group when its policy is executed.
:cvar CHANGE_IN_CAPACITY: Increases or decreases the existing capacity.
:cvar EXACT_CAPACITY: Changes the current capacity to the specified value.
:cvar PERCENT_CHANGE_IN_CAPACITY: Increases or decreases the capacity by a
percentage.
"""
CHANGE_IN_CAPACITY = 'CHANGE_IN_CAPACITY'
EXACT_CAPACITY = 'EXACT_CAPACITY'
PERCENT_CHANGE_IN_CAPACITY = 'PERCENT_CHANGE_IN_CAPACITY'
class AutoScaleTerminationPolicy(object):
"""
The policy to be used for automatic removal of members from an auto scale
group. Policy determines which members are chosen first for removal.
:cvar OLDEST_INSTANCE: Terminates the oldest instance in the group.
:cvar NEWEST_INSTANCE: Terminates the newest instance in the group.
:cvar CLOSEST_TO_NEXT_CHARGE: Terminates instances that are closest to the
next billing charge.
:cvar DEFAULT: Default termination policy.
"""
OLDEST_INSTANCE = 0
NEWEST_INSTANCE = 1
CLOSEST_TO_NEXT_CHARGE = 2
DEFAULT = 3
| apache-2.0 | 8,319,478,700,418,232,000 | 34.359375 | 78 | 0.719399 | false | 3.970175 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.