repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
AustereCuriosity/astropy | astropy/config/paths.py | 1 | 10744 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..utils.decorators import wraps
import os
import shutil
import sys
__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config',
'set_temp_cache']
def _find_home():
""" Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
# this is used below to make fix up encoding issues that sometimes crop up
# in py2.x but not in py3.x
if six.PY2:
decodepath = lambda pth: pth.decode(sys.getfilesystemencoding())
else:
decodepath = lambda pth: pth
# First find the home directory - this is inspired by the scheme ipython
# uses to identify "home"
if os.name == 'posix':
# Linux, Unix, AIX, OS X
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find unix home directory to search for '
'astropy config dir')
elif os.name == 'nt': # This is for all modern Windows (NT or after)
if 'MSYSTEM' in os.environ and os.environ.get('HOME'):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = decodepath(os.environ['HOME'])
# Next try for a network home
elif 'HOMESHARE' in os.environ:
homedir = decodepath(os.environ['HOMESHARE'])
# See if there's a local home
elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
homedir = os.path.join(os.environ['HOMEDRIVE'],
os.environ['HOMEPATH'])
homedir = decodepath(homedir)
# Maybe a user profile?
elif 'USERPROFILE' in os.environ:
homedir = decodepath(os.path.join(os.environ['USERPROFILE']))
else:
try:
from ..extern.six.moves import winreg as wreg
shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, 'Personal')[0]
homedir = decodepath(homedir)
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find windows home directory to '
'search for astropy config dir')
else:
# for other platforms, try HOME, although it probably isn't there
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find a home directory to search for '
'astropy config dir - are you on an unspported '
'platform?')
return homedir
def get_config_dir(create=True):
"""
Determines the Astropy configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, 'astropy')
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get('XDG_CONFIG_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('config', linkto))
def get_cache_dir():
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, 'astropy')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get('XDG_CACHE_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('cache', linkto))
class _SetTempPath(object):
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
return self._default_path_getter()
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects
from .configuration import _cfgobjs
path = super(set_temp_config, self).__enter__()
_cfgobjs.clear()
return path
def __exit__(self, *args):
from .configuration import _cfgobjs
super(set_temp_config, self).__exit__(*args)
_cfgobjs.clear()
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_astropy_dir(dirnm, linkto):
innerdir = os.path.join(_find_home(), '.astropy')
maindir = os.path.join(_find_home(), '.astropy', dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
msg = 'Intended Astropy directory {0} is actually a file.'
raise IOError(msg.format(innerdir))
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (not sys.platform.startswith('win') and
linkto is not None and
not os.path.exists(linkto)):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
msg = 'Intended Astropy {0} directory {1} is actually a file.'
raise IOError(msg.format(dirnm, maindir))
return os.path.abspath(maindir)
| bsd-3-clause | 6,146,739,670,736,519,000 | 33.107937 | 99 | 0.612249 | false |
sqall01/alertR | alertClientExecuter/alertRclient.py | 1 | 12421 | #!/usr/bin/env python3
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the GNU Affero General Public License, version 3.
import sys
import os
import stat
from lib import ServerCommunication, ConnectionWatchdog, Receiver
from lib import SMTPAlert
from lib import ExecuterAlert, AlertEventHandler
from lib import GlobalData
import logging
import time
import random
import xml.etree.ElementTree
# Function creates a path location for the given user input.
def make_path(input_location: str) -> str:
# Do nothing if the given location is an absolute path.
if input_location[0] == "/":
return input_location
# Replace ~ with the home directory.
elif input_location[0] == "~":
pos = -1
for i in range(1, len(input_location)):
if input_location[i] == "/":
continue
pos = i
break
if pos == -1:
return os.environ["HOME"]
return os.path.join(os.environ["HOME"], input_location[pos:])
# Assume we have a given relative path.
return os.path.join(os.path.dirname(os.path.abspath(__file__)), input_location)
if __name__ == '__main__':
# generate object of the global needed data
globalData = GlobalData()
fileName = os.path.basename(__file__)
# parse config file, get logfile configurations
# and initialize logging
try:
configRoot = xml.etree.ElementTree.parse(globalData.configFile).getroot()
logfile = make_path(str(configRoot.find("general").find("log").attrib["file"]))
# parse chosen log level
tempLoglevel = str(configRoot.find("general").find("log").attrib["level"])
tempLoglevel = tempLoglevel.upper()
if tempLoglevel == "DEBUG":
loglevel = logging.DEBUG
elif tempLoglevel == "INFO":
loglevel = logging.INFO
elif tempLoglevel == "WARNING":
loglevel = logging.WARNING
elif tempLoglevel == "ERROR":
loglevel = logging.ERROR
elif tempLoglevel == "CRITICAL":
loglevel = logging.CRITICAL
else:
raise ValueError("No valid log level in config file.")
# initialize logging
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
filename=logfile,
level=loglevel)
except Exception as e:
print("Config could not be parsed.")
print(e)
sys.exit(1)
# parse the rest of the config with initialized logging
try:
# Check file permission of config file (do not allow it to be accessible by others).
config_stat = os.stat(globalData.configFile)
if (config_stat.st_mode & stat.S_IROTH
or config_stat.st_mode & stat.S_IWOTH
or config_stat.st_mode & stat.S_IXOTH):
raise ValueError("Config file is accessible by others. Please remove file permissions for others.")
# check if config and client version are compatible
version = float(configRoot.attrib["version"])
if version != globalData.version:
raise ValueError("Config version '%.3f' not "
% version
+ "compatible with client version '%.3f'."
% globalData.version)
# parse server configurations
server = str(configRoot.find("general").find("server").attrib["host"])
serverPort = int(configRoot.find("general").find("server").attrib["port"])
# get server certificate file and check if it does exist
serverCAFile = os.path.abspath(make_path(str(configRoot.find("general").find("server").attrib["caFile"])))
if os.path.exists(serverCAFile) is False:
raise ValueError("Server CA does not exist.")
# get client certificate and keyfile (if required)
certificateRequired = (str(configRoot.find("general").find(
"client").attrib["certificateRequired"]).upper() == "TRUE")
if certificateRequired is True:
clientCertFile = os.path.abspath(
make_path(str(configRoot.find("general").find("client").attrib["certFile"])))
clientKeyFile = os.path.abspath(
make_path(str(configRoot.find("general").find("client").attrib["keyFile"])))
if (os.path.exists(clientCertFile) is False
or os.path.exists(clientKeyFile) is False):
raise ValueError("Client certificate or key does not exist.")
key_stat = os.stat(clientKeyFile)
if (key_stat.st_mode & stat.S_IROTH
or key_stat.st_mode & stat.S_IWOTH
or key_stat.st_mode & stat.S_IXOTH):
raise ValueError("Client key is accessible by others. Please remove file permissions for others.")
else:
clientCertFile = None
clientKeyFile = None
# get user credentials
username = str(configRoot.find("general").find("credentials").attrib["username"])
password = str(configRoot.find("general").find("credentials").attrib["password"])
# Get connection settings.
temp = (str(configRoot.find("general").find("connection").attrib["persistent"]).upper() == "TRUE")
if temp:
globalData.persistent = 1
else:
globalData.persistent = 0
# parse smtp options if activated
smtpActivated = (str(configRoot.find("smtp").find("general").attrib["activated"]).upper() == "TRUE")
smtpServer = ""
smtpPort = -1
smtpFromAddr = ""
smtpToAddr = ""
if smtpActivated is True:
smtpServer = str(configRoot.find("smtp").find("server").attrib["host"])
smtpPort = int(configRoot.find("smtp").find("server").attrib["port"])
smtpFromAddr = str(configRoot.find("smtp").find("general").attrib["fromAddr"])
smtpToAddr = str(configRoot.find("smtp").find("general").attrib["toAddr"])
# parse all alerts
for item in configRoot.find("alerts").iterfind("alert"):
alert = ExecuterAlert()
# Get executer specific values.
temp_execute = make_path(str(item.find("executer").attrib["execute"]))
alert.cmd_triggered_list.append(temp_execute)
alert.cmd_normal_list.append(temp_execute)
alert.cmd_profile_change_list.append(temp_execute)
# Parse all arguments that are used for the command when
# a sensor alert with state "triggered" is received.
cmd_triggered_activated = str(item.find("executer").find("triggered").attrib["activated"]).upper() == "TRUE"
alert.cmd_triggered_activated = cmd_triggered_activated
if cmd_triggered_activated:
for argument in item.find("executer").find("triggered").iterfind("argument"):
alert.cmd_triggered_list.append(str(argument.text))
# Parse all arguments that are used for the command when
# a sensor alert with state "normal" is received.
cmd_normal_activated = str(item.find("executer").find("normal").attrib["activated"]).upper() == "TRUE"
alert.cmd_normal_activated = cmd_normal_activated
if cmd_normal_activated:
for argument in item.find("executer").find("normal").iterfind("argument"):
alert.cmd_normal_list.append(str(argument.text))
# Parse all arguments that are used for the command when
# a profile change message is received.
cmd_profile_change_activated = str(
item.find("executer").find("profilechange").attrib["activated"]).upper() == "TRUE"
alert.cmd_profile_change_activated = cmd_profile_change_activated
if cmd_profile_change_activated:
for profile in item.find("executer").find("profilechange").iterfind("profile"):
alert.cmd_profile_change_target_profiles.add(int(profile.text))
for argument in item.find("executer").find("profilechange").iterfind("argument"):
alert.cmd_profile_change_list.append(str(argument.text))
# these options are needed by the server to
# differentiate between the registered alerts
alert.id = int(item.find("general").attrib["id"])
alert.description = str(item.find("general").attrib["description"])
alert.alertLevels = list()
for alertLevelXml in item.iterfind("alertLevel"):
alert.alertLevels.append(int(alertLevelXml.text))
# check if description is empty
if len(alert.description) == 0:
raise ValueError("Description of alert %d is empty."
% alert.id)
# check if the id of the alert is unique
for registeredAlert in globalData.alerts:
if registeredAlert.id == alert.id:
raise ValueError("Id of alert %d is already taken."
% alert.id)
if cmd_profile_change_activated and not alert.cmd_profile_change_target_profiles:
raise ValueError("No profiles set for profilechange of alert %d."
% alert.id)
globalData.alerts.append(alert)
except Exception as e:
logging.exception("[%s]: Could not parse config." % fileName)
sys.exit(1)
random.seed()
# check if smtp is activated => generate object to send eMail alerts
if smtpActivated is True:
globalData.smtpAlert = SMTPAlert(smtpServer, smtpPort, smtpFromAddr, smtpToAddr)
else:
globalData.smtpAlert = None
# generate object for the communication to the server and connect to it
globalData.serverComm = ServerCommunication(server,
serverPort,
serverCAFile,
username,
password,
clientCertFile,
clientKeyFile,
AlertEventHandler(globalData),
globalData)
connectionRetries = 1
logging.info("[%s]: Connecting to server." % fileName)
while True:
# check if 5 unsuccessful attempts are made to connect
# to the server and if smtp alert is activated
# => send eMail alert
if (globalData.smtpAlert is not None
and (connectionRetries % 5) == 0):
globalData.smtpAlert.sendCommunicationAlert(connectionRetries)
if globalData.serverComm.initialize() is True:
# if smtp alert is activated
# => send email that communication problems are solved
if globalData.smtpAlert is not None:
globalData.smtpAlert.sendCommunicationAlertClear()
connectionRetries = 1
break
connectionRetries += 1
logging.critical("[%s]: Connecting to server failed. Try again in 5 seconds." % fileName)
time.sleep(5)
# when connected => generate watchdog object to monitor the
# server connection
logging.info("[%s]: Starting watchdog thread." % fileName)
watchdog = ConnectionWatchdog(globalData.serverComm,
globalData.pingInterval,
globalData.smtpAlert)
# set thread to daemon
# => threads terminates when main thread terminates
watchdog.daemon = True
watchdog.start()
# initialize all alerts
logging.info("[%s] Initializing alerts." % fileName)
for alert in globalData.alerts:
alert.initialize()
logging.info("[%s]: Client started." % fileName)
# generate receiver to handle incoming data (for example status updates)
# (note: we will not return from the receiver unless the client is terminated)
receiver = Receiver(globalData.serverComm)
receiver.run()
| agpl-3.0 | 694,129,595,070,432,500 | 41.979239 | 120 | 0.591901 | false |
kwikteam/phy | tools/api.py | 1 | 7682 | # -*- coding: utf-8 -*-
"""Minimal API documentation generation."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from importlib import import_module
import inspect
import os.path as op
import re
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
def _name(obj):
if hasattr(obj, '__name__'):
return obj.__name__
elif inspect.isdatadescriptor(obj):
return obj.fget.__name__
def _full_name(subpackage, obj):
return '{}.{}'.format(subpackage.__name__, _name(obj))
def _anchor(name):
anchor = name.lower().replace(' ', '-')
anchor = re.sub(r'[^\w\- ]', '', anchor)
return anchor
_docstring_header_pattern = re.compile(r'^([^\n]+)\n[\-\=]{3,}$', flags=re.MULTILINE)
_docstring_parameters_pattern = re.compile(r'^([^ \n]+) \: ([^\n]+)$', flags=re.MULTILINE)
def _replace_docstring_header(paragraph):
"""Process NumPy-like function docstrings."""
# Replace Markdown headers in docstrings with light headers in bold.
paragraph = re.sub(_docstring_header_pattern, r'**\1**', paragraph)
paragraph = re.sub(_docstring_parameters_pattern, r'\n* `\1 : \2` ', paragraph)
return paragraph
def _doc(obj):
doc = inspect.getdoc(obj) or ''
doc = doc.strip()
if r'\n\n' in doc:
i = doc.index(r'\n\n')
doc[:i] = re.sub(r'\n(?!=\n)', '', doc[:i]) # remove standalone newlines
if doc and '---' in doc:
return _replace_docstring_header(doc)
else:
return doc
#------------------------------------------------------------------------------
# Introspection methods
#------------------------------------------------------------------------------
def _is_public(obj):
name = _name(obj) if not isinstance(obj, str) else obj
if name:
return not name.startswith('_')
else:
return True
def _is_defined_in_package(obj, package):
if isinstance(obj, property):
obj = obj.fget
mod = inspect.getmodule(obj)
if mod and hasattr(mod, '__name__'):
name = mod.__name__
return name.split('.')[0].startswith(package)
return True
def _iter_doc_members(obj, package=None):
for name, member in inspect.getmembers(obj):
if _is_public(name):
if package is None or _is_defined_in_package(member, package):
yield member
def _iter_subpackages(package, subpackages):
"""Iterate through a list of subpackages."""
for subpackage in subpackages:
yield import_module('{}.{}'.format(package, subpackage))
def _iter_vars(mod):
"""Iterate through a list of variables define in a module's public namespace."""
vars = sorted(var for var in dir(mod) if _is_public(var))
for var in vars:
yield getattr(mod, var)
def _iter_functions(subpackage):
return filter(inspect.isfunction, _iter_vars(subpackage))
def _iter_classes(subpackage):
return filter(inspect.isclass, _iter_vars(subpackage))
def _iter_methods(klass, package=None):
for member in _iter_doc_members(klass, package):
if inspect.isfunction(member) or inspect.ismethod(member):
if inspect.isdatadescriptor(member):
continue
yield member
def _iter_properties(klass, package=None):
for member in _iter_doc_members(klass, package):
if isinstance(member, property):
yield member.fget
#------------------------------------------------------------------------------
# API doc generation
#------------------------------------------------------------------------------
def _function_header(subpackage, func):
"""Generate the docstring of a function."""
args = str(inspect.signature(func))
return "{name}{args}".format(name=_full_name(subpackage, func), args=args)
_FUNCTION_PATTERN = '%s\n\n\n**`%s`**\n\n%s\n\n---'
def _doc_function(subpackage, func):
title = _full_name(subpackage, func)
return _FUNCTION_PATTERN % (title, _function_header(subpackage, func), _doc(func))
def _doc_method(klass, func):
"""Generate the docstring of a method."""
args = str(inspect.signature(func))
title = "{klass}.{name}".format(klass=klass.__name__, name=_name(func))
header = "{klass}.{name}{args}".format(klass=klass.__name__, name=_name(func), args=args)
docstring = _doc(func)
return _FUNCTION_PATTERN % (title, header, docstring)
def _doc_property(klass, prop):
"""Generate the docstring of a property."""
header = "{klass}.{name}".format(klass=klass.__name__, name=_name(prop))
docstring = _doc(prop)
return _FUNCTION_PATTERN % (header, header, docstring)
def _link(name, anchor=None):
return "[{name}](#{anchor})".format(name=name, anchor=anchor or _anchor(name))
def _generate_preamble(package, subpackages):
yield "# API documentation of {}".format(package)
yield _doc(import_module(package))
yield "## Table of contents"
# Table of contents: list of modules.
for subpackage in _iter_subpackages(package, subpackages):
subpackage_name = subpackage.__name__
yield "### " + _link(subpackage_name)
# List of top-level functions in the subpackage.
for func in _iter_functions(subpackage):
yield '* ' + _link(
_full_name(subpackage, func), _anchor(_full_name(subpackage, func)))
# All public classes.
for klass in _iter_classes(subpackage):
# Class documentation.
yield "* " + _link(_full_name(subpackage, klass))
yield ""
yield ""
def _generate_paragraphs(package, subpackages):
"""Generate the paragraphs of the API documentation."""
# API doc of each module.
for subpackage in _iter_subpackages(package, subpackages):
subpackage_name = subpackage.__name__
yield "## {}".format(subpackage_name)
# Subpackage documentation.
yield _doc(import_module(subpackage_name))
yield "---"
# List of top-level functions in the subpackage.
for func in _iter_functions(subpackage):
yield '#### ' + _doc_function(subpackage, func)
# All public classes.
for klass in _iter_classes(subpackage):
# Class documentation.
yield "### {}".format(_full_name(subpackage, klass))
yield _doc(klass)
yield "---"
for method in _iter_methods(klass, package):
yield '#### ' + _doc_method(klass, method)
for prop in _iter_properties(klass, package):
yield '#### ' + _doc_property(klass, prop)
def _print_paragraph(paragraph):
out = ''
out += paragraph + '\n'
if not paragraph.startswith('* '):
out += '\n'
return out
def generate_api_doc(package, subpackages, path=None):
out = ''
for paragraph in _generate_preamble(package, subpackages):
out += _print_paragraph(paragraph)
for paragraph in _generate_paragraphs(package, subpackages):
out += _print_paragraph(paragraph)
if path is None:
return out
else:
with open(path, 'w') as f:
f.write('\n'.join([_.rstrip() for _ in out.splitlines()]))
if __name__ == '__main__':
package = 'phy'
subpackages = ['utils', 'gui', 'plot', 'cluster', 'apps', 'apps.template', 'apps.kwik']
curdir = op.dirname(op.realpath(__file__))
path = op.join(curdir, '../docs/api.md')
generate_api_doc(package, subpackages, path=path)
| bsd-3-clause | -4,705,370,727,807,438,000 | 29.355731 | 93 | 0.564193 | false |
FabriceSalvaire/PySpice | examples/ngspice-shared/ngspice-interpreter.py | 1 | 1806 | ####################################################################################################
#r#
#r# =====================
#r# NgSpice Interpreter
#r# =====================
#r#
#r# This example explains how to use the NgSpice binding.
#r#
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Spice.NgSpice.Shared import NgSpiceShared
####################################################################################################
ngspice = NgSpiceShared.new_instance()
print(ngspice.exec_command('version -f'))
print(ngspice.exec_command('print all'))
print(ngspice.exec_command('devhelp'))
print(ngspice.exec_command('devhelp resistor'))
circuit = '''
.title Voltage Multiplier
.SUBCKT 1N4148 1 2
*
R1 1 2 5.827E+9
D1 1 2 1N4148
*
.MODEL 1N4148 D
+ IS = 4.352E-9
+ N = 1.906
+ BV = 110
+ IBV = 0.0001
+ RS = 0.6458
+ CJO = 7.048E-13
+ VJ = 0.869
+ M = 0.03
+ FC = 0.5
+ TT = 3.48E-9
.ENDS
Vinput in 0 DC 0V AC 1V SIN(0V 10V 50Hz 0s 0Hz)
C0 in 1 1mF
X0 1 0 1N4148
C1 0 2 1mF
X1 2 1 1N4148
C2 1 3 1mF
X2 3 2 1N4148
C3 2 4 1mF
X3 4 3 1N4148
C4 3 5 1mF
X4 5 4 1N4148
R1 5 6 1MegOhm
.options TEMP = 25°C
.options TNOM = 25°C
.options filetype = binary
.options NOINIT
.ic
.tran 0.0001s 0.4s 0s
.end
'''
ngspice.load_circuit(circuit)
print('Loaded circuit:')
print(ngspice.listing())
print(ngspice.show('c3'))
print(ngspice.showmod('c3'))
ngspice.run()
print('Plots:', ngspice.plot_names)
print(ngspice.ressource_usage())
print(ngspice.status())
plot = ngspice.plot(simulation=None, plot_name=ngspice.last_plot)
print(plot)
# ngspice.quit()
| gpl-3.0 | -2,954,109,373,574,108,700 | 19.735632 | 100 | 0.525499 | false |
thinkle/gourmet | gourmet/check_encodings.py | 1 | 10430 | from typing import Dict
from gi.repository import Gtk
from .gdebug import debug
from .gtk_extras import dialog_extras as de
from gettext import gettext as _
from .prefs import Prefs
class CheckEncoding:
"""A class to read a file and guess the correct text encoding."""
encodings = ['iso8859', 'ascii', 'latin_1', 'cp850', 'cp1252', 'utf-8']
all_encodings = ['ascii', 'cp037', 'cp424', 'cp437', 'cp500', 'cp737',
'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857',
'cp860', 'cp861', 'cp862', 'cp863', 'cp864', 'cp865',
'cp869', 'cp874', 'cp875', 'cp1006', 'cp1026', 'cp1140',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254',
'cp1255', 'cp1256', 'cp1258', 'latin_1', 'iso8859_2',
'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_8', 'iso8859_9', 'iso8859_10',
'iso8859_13', 'iso8859_14', 'iso8859_15', 'koi8_r',
'koi8_u', 'mac_cyrillic', 'mac_greek', 'mac_iceland',
'mac_latin2', 'mac_roman', 'mac_turkish', 'utf_16',
'utf_16_be', 'utf_16_le', 'utf_7', 'utf_8']
def __init__(self, file, encodings=None):
if Prefs.instance().get('utf-16', False):
self.encodings.extend(['utf_16', 'utf_16_le', 'utf_16_be'])
if encodings is not None:
self.encodings = encodings
if isinstance(file, str):
file = open(file, 'rb')
self.txt = file.read()
file.close()
def test_encodings(self):
"""Move through self.encodings one at a time and return the first
encoding that decodes our text cleanly. We return a tuple (encoding,decoded_text)"""
for e in self.encodings:
try:
t=self.txt.decode(e)
return (e,t)
except UnicodeDecodeError:
pass
def get_encodings (self):
encs = self.test_all_encodings(self.encodings)
if encs:
return encs
else:
return self.test_all_encodings(self.all_encodings)
def test_all_encodings (self,encodings=None):
"""Test all encodings and return a dictionary of possible encodings."""
if not encodings:
encodings=self.all_encodings
self.possible_encodings = {}
for e in encodings:
try:
d=self.txt.decode(e)
if d and (d not in self.possible_encodings.values()):
# if we don't already have this possibility, add
self.possible_encodings[e] = d
except UnicodeDecodeError:
pass
return self.possible_encodings
class GetFile(CheckEncoding):
"""Handed a filename, return a list of lines."""
def __init__(self, file: str, encodings=None):
super().__init__(file, encodings)
encs: Dict[str, str] = self.get_encodings()
if encs:
if len(list(encs.keys())) > 1:
encoding = getEncoding(encodings=encs)
else:
encoding = list(encs.keys())[0]
self.enc = encoding
self.lines = encs[self.enc].splitlines()
debug('reading file %s as encoding %s'%(file, self.enc))
else:
raise Exception("Cannot decode file %s" % file)
def get_file(file: str, encodings=None):
gf = GetFile(file, encodings)
return gf.lines
class EncodingDialog(de.OptionDialog):
"""Create a dialog to allow user to select correct encoding for an input file."""
context_lines = 2
def __init__(self, default=None, label=_("Select encoding"),
sublabel=_("Cannot determine proper encoding. Please select the correct encoding from the following list."),
expander_label=_("See _file with encoding"),
encodings=None):
self.diff_lines = {}
self.cursor_already_set = False
self.expander_label = expander_label
self.encodings = encodings if encodings is not None else {}
self.current_error = 0
self.diff_texts()
self.options = self.create_options()
expander = self.create_expander()
self.setup_buffers()
super().__init__(default=default, label=label, sublabel=sublabel,
options=self.options, expander=expander)
self.set_default_size(700, 500)
self.combobox.connect('changed', self.change_encoding)
self.change_encoding()
self.created = False
self.expander.set_expanded(True)
def setup_motion_buttons (self):
self.hbb = Gtk.HButtonBox()
self.fb = Gtk.Button('Next Difference')
self.pb = Gtk.Button('Previous Difference')
self.pb.connect('clicked',lambda *args: self.move_to_difference(forward=False))
self.fb.connect('clicked',lambda *args: self.move_to_difference(forward=True))
self.hbb.add(self.pb)
self.hbb.add(self.fb)
self.evb.add(self.hbb)
self.hbb.show_all()
def get_option(self, widget):
super().get_option(widget)
self.change_encoding()
def create_options (self):
options = list(self.encodings.keys())
masterlist = CheckEncoding.encodings + CheckEncoding.all_encodings
options.sort(key=lambda x: masterlist.index(x))
return options
def create_expander(self):
self.evb = Gtk.VBox(vexpand=True)
self.tv = Gtk.TextView()
self.tv.set_editable(False)
self.buffer = self.tv.get_buffer()
self.evb.pack_start(self.tv, expand=True, fill=True, padding=0)
self.evb.show_all()
return self.expander_label, self.evb
def setup_buffers (self):
self.encoding_buffers={}
for k,t in list(self.encodings.items()):
self.encoding_buffers[k]=Gtk.TextBuffer()
self.highlight_tags = [self.encoding_buffers[k].create_tag(background='yellow')]
self.line_highlight_tags = [self.encoding_buffers[k].create_tag(background='green')]
self.set_buffer_text(self.encoding_buffers[k],t)
def change_encoding (self, _widget=None):
if self.cursor_already_set:
im=self.buffer.get_insert()
ti=self.buffer.get_iter_at_mark(im)
offset=ti.get_offset()
self.tv.set_buffer(self.encoding_buffers[self.ret])
self.buffer = self.encoding_buffers[self.ret]
debug('changed text to encoding %s'%self.ret,0)
def move_to_difference (self, forward=True):
dkeys = list(self.diff_lines.keys())
dkeys.sort()
if forward:
self.current_error += 1
else:
self.current_error = self.current_error - 1
if self.current_error > len(dkeys): self.current_error = 0
if self.current_error < 0: self.current_error = len(dkeys)-1
mark=self.buffer.create_mark(
None,
self.buffer.get_iter_at_line_index(dkeys[self.current_error],0),
False,
)
self.tv.scroll_to_mark(mark,0)
def set_buffer_text (self, buffer, text):
"""Set buffer text to show encoding differences."""
lines = text.splitlines()
totl = len(lines)
shown = []
for line,diffs in list(self.diff_lines.items()):
if line in shown: continue
start_at = line - self.context_lines
if start_at < 0: start_at = 0
end_at = line + self.context_lines
if end_at >= totl: end_at = totl-1
if start_at != 0:
buffer.insert_with_tags(buffer.get_end_iter(),
'\n...\n',
)
for n in range(start_at,end_at):
if n in shown:
continue
shown.append(n)
l = lines[n]
if n==line:
start = 0
for sdiff,ediff in diffs:
buffer.insert_with_tags(buffer.get_end_iter(),
l[start:sdiff],
*self.line_highlight_tags)
buffer.insert_with_tags(buffer.get_end_iter(),
l[sdiff:ediff],
*self.highlight_tags)
start = ediff
buffer.insert_with_tags(buffer.get_end_iter(),
l[start:],
*self.line_highlight_tags)
else:
buffer.insert_with_tags(buffer.get_end_iter(),l)
def diff_texts(self):
"""Compare different encoding for characters where they differ."""
encoded_buffers = list(self.encodings.values())
# Sort by number of newlines (most first)
encoded_buffers.sort(key=lambda x: len(x.splitlines()), reverse=True)
enc1 = encoded_buffers[0]
enc_rest = [e.splitlines() for e in encoded_buffers[1:]]
for linenum, line in enumerate(enc1.splitlines()):
other_lines = [len(e) > linenum and e[linenum] for e in enc_rest]
# Remove any Falses returned by above
other_lines = [x for x in other_lines if not isinstance(x, bool)]
# If not all lines are the same, create a diff marking where they
# differ.
if not all(line == ol for ol in other_lines):
ranges = []
for chnum, ch in enumerate(line):
# Check that the lines are the same. If not, mark where
if not all([len(line) > chnum and ch == line[chnum]
for line in other_lines]):
if ranges and ranges[-1][1] == chnum:
ranges[-1][1] = chnum+1
else:
ranges.append([chnum, chnum+1])
self.diff_lines[linenum] = ranges
def getEncoding(*args, **kwargs):
dialog = EncodingDialog(*args, **kwargs)
result = dialog.run()
if (not result) and dialog.encodings:
return dialog.options[0]
elif not result:
return 'ascii'
else:
return result
| gpl-2.0 | -2,510,780,388,387,652,600 | 40.553785 | 125 | 0.541611 | false |
jakenjarvis/pyOss | MasterlistChecker.py | 1 | 12321 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file encoding UTF-8 no BOM. このファイルの文字コードはUTF-8 BOM無しです。
################################################################################
__appname__ = "MasterlistChecker.py"
__author__ = "Jaken<[email protected]>"
__copyright__ = "Copyright 2010, Jaken"
__license__ = """
GNU General Public License v3
This file is part of pyOss.
Copyright (C) 2010 Jaken.([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__version__ = "1.0.0"
__credits__ = [
'"Jaken" <[email protected]>',
]
################################################################################
# Import
################################################################################
import sys
import os
import codecs
import re
reload(sys)
sys.setdefaultencoding('utf-8')
import uuid
from optparse import OptionParser
from pyOssLib.v1_0.MasterlistLib import *
from pyOssLib.v1_0.UserlistLib import *
################################################################################
# Global variable
################################################################################
# [masterlist] ESM、ESPファイル検出正規表現(「>」と「<」は、ファイルとして認識させる)
regexMods = re.compile(ur"^([><]?)([^><\\%?*:\"$^]{1}[^\\><:\"/|?*]*[.](esm|esp))\s*.*$", re.IGNORECASE)
# [masterlist] コメントorコマンド行検出正規表現
regexCommand = re.compile(ur"^([><]?)([\\%?*:\"$^]{1})\s*(.*)$")
# [masterlist] グループ開始検出正規表現 \BeginGroup\: Post BSA
regexBeginGroup = re.compile(ur"^\\BeginGroup\\:(.*)", re.IGNORECASE)
# [masterlist] グループ終了検出正規表現 \EndGroup\\
regexEndGroup = re.compile(ur"^\\EndGroup\\\\", re.IGNORECASE)
# [masterlist補正用] BASH定義っぽい行検出正規表現
regexExBash = re.compile(ur"^([{]{2}BASH[:]\S+[}]{2}.*)$", re.IGNORECASE)
# [masterlist補正用] コメント間違いっぽい行検出正規表現
regexExComment = re.compile(ur"^/\s*(.*)")
# [masterlist補正用] ESM,ESPっぽい行検出正規表現
regexExMods1 = re.compile(ur"^(\w+(\w|[ ]|[$%'_@!()~-])+)\s*$")
regexWarnings = re.compile(ur"""
^([><]?)
([^><\\%?*:\"$^]{1})
([a-zA-Z0-9_() .\[\]#!+,%&'-])+
(?!
(
\s{2,}
|[_() .\[\]#!+,%&'-]{2,}
)
)[.](esm|esp)$
""", re.IGNORECASE | re.VERBOSE)
################################################################################
# Function
################################################################################
def CreateUuid():
return unicode(uuid.uuid4())
################################################################################
# Main
################################################################################
if __name__ == "__main__":
print u"%s Version: %s %s" % (__appname__, __version__, __copyright__)
print u""
usage = u"%prog [Options] MASTERLISTFILE"
version = u"%s %s" % (u"%prog", __version__)
parser = OptionParser(usage = usage, version = version)
parser.add_option("-o", "--output",
action="store",
type="string",
dest="outfilename",
default="MasterlistChecker.txt",
metavar="FILE",
help="specify an output file")
parser.add_option("-d", "--debug",
action="store_true",
dest="debug",
default=False,
help="debug output")
# オプションの解析
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error(u"incorrect number of arguments")
args0 = unicode(args[0], "shift-jis")
outfilename = unicode(options.outfilename, "shift-jis")
# 絶対パスの取得
MasterlistFile = u"%s" % (os.path.abspath(args0))
OutputFile = u"%s" % (os.path.abspath(outfilename))
# 入力ファイルの存在チェック
if not os.path.exists(MasterlistFile):
parser.error(u"file not exists. \'%s\'" % MasterlistFile)
# 出力ファイルが存在していたら削除する
if os.path.exists(OutputFile):
os.remove(OutputFile)
# 出力開始
fileoutput = codecs.open(OutputFile, "wU", "utf-8-sig")
try:
# 適当に出力用ファンクション作成
def WriteLine(debug = False, screen = True, file = True, line = u""):
if debug:
if options.debug:
# 出力する
if screen:
print u"%s" % (line)
if file:
fileoutput.write(u"%s\r\n" % (line))
else:
# 出力しない
pass
else:
if screen:
print u"%s" % (line)
if file:
fileoutput.write(u"%s\r\n" % (line))
return
def PrintWriteLine(line):
WriteLine(False, False, True, line)
return
def DebugWriteLine(line):
WriteLine(True, True, True, line)
return
PrintWriteLine(u"--------------------------------------------------")
PrintWriteLine(u"Output pyOss - MasterlistChecker.py")
PrintWriteLine(u"Input files:")
PrintWriteLine(u" Masterlist : %s" % (MasterlistFile))
PrintWriteLine(u"Output files:")
PrintWriteLine(u" OutputFile : %s" % (OutputFile))
PrintWriteLine(u"--------------------------------------------------")
SkipLines = []
StackErrors = {}
def AddStackErrors(count, error, message):
if not error in StackErrors:
StackErrors[error] = []
StackErrors[error] += [[count, message]]
# --------------------------------------------------
#
# --------------------------------------------------
masterfile = Masterlist()
def _onEncodingError(linecount, linestring, encoding):
message = u"Can not be displayed : %s" % (encoding)
AddStackErrors(linecount, u"A01 UNICODE encoding error!", message)
return
masterfile.OnEncodingErrorFromSave = _onEncodingError
masterfile.OnDecodingErrorFromLoad = _onEncodingError
def _onCreateLineObject(linecount, linestring):
lineobject = Line(linestring)
lineobject.LineCount = linecount
linestring = lineobject.LineString
if lineobject.IsType(EnumLineType.OTHER):
matchEx = [regexExBash.search(linestring)
,regexExComment.search(linestring)
,regexExMods1.search(linestring)]
if matchEx[0] is not None:
# Bashタグを書いたが、先頭の%を書き忘れちゃった感で一杯の行
# 先頭に%を付け足す。
linecorrectionstring = u"%"
AddStackErrors(linecount, u"A02 Typographical errors!", u"%s => %s" % (linecorrectionstring, linestring))
elif matchEx[1] is not None:
# コメントを書いたが、「\バックスラッシュ」と「/スラッシュ」を間違えた感で一杯の行
# ¥マークに書き換える。(英語圏では¥マークは\バックスラッシュに置き換えられる)
linecorrectionstring = u"\\"
AddStackErrors(linecount, u"A02 Typographical errors!", u"%s => %s" % (linecorrectionstring, linestring))
elif matchEx[2] is not None:
# 拡張子を書き忘れた感で一杯の行
# espとみなす。(少なくともピリオドがない行はESPファイルと思われる。)
# 今のところesmではミスなさそう。
linecorrectionstring = u".esp"
AddStackErrors(linecount, u"A02 Typographical errors!", u"%s => %s" % (linecorrectionstring, linestring))
else:
if len(linestring) != 0:
AddStackErrors(linecount, u"A03 Format unknown error!", u"%s" % (linestring))
if lineobject.IsType(EnumLineType.MODS):
match2 = regexWarnings.search(linestring)
if match2 == None:
pass
#AddStackErrors(linecount, u"A04 Warning! Please check if this is correct.", u"%s" % (linestring))
return lineobject
masterfile.OnCreateLineObject = _onCreateLineObject
loadingflg = False
try:
masterfile.Load(MasterlistFile)
loadingflg = True
except BaseException as ex:
AddStackErrors(0, u"A05 Load error!", unicode(ex))
PrintWriteLine(u"--------------------------------------------------")
PrintWriteLine(u"Could not run some checks!!!!")
PrintWriteLine(u"--------------------------------------------------")
if loadingflg:
AddStackErrors(0, u"A00 Encoding Information", masterfile.Encoding)
GroupLists = {}
ModsLists = {}
for object in masterfile.EachRecursion():
if isinstance(object, Line):
if object.IsType(EnumLineType.MODS):
if object.LineString in ModsLists:
ModsLists[object.LineString] += [object]
else:
ModsLists[object.LineString] = [object]
if object.GetParentGroup().GroupName == None:
AddStackErrors(object.LineCount, u"B01 Warning! There are lines that do not belong to the group.", u"%s" % (object.LineString))
elif isinstance(object, Block):
pass
elif isinstance(object, Group):
if object.GroupName in GroupLists:
GroupLists[object.GroupName] += [object]
else:
GroupLists[object.GroupName] = [object]
for key, value in GroupLists.iteritems():
if len(value) >= 2:
for group in value:
linecount = group.GetTopChild().GetTopChild().LineCount
AddStackErrors(linecount, u"B02 Duplicate groups error!", u"%s" % (group.GroupName))
for key, value in ModsLists.iteritems():
if len(value) >= 2:
for mods in value:
AddStackErrors(mods.LineCount, u"B03 Duplicate mods error!", u"%s" % (mods.LineString))
# --------------------------------------------------
# エラーの出力
# --------------------------------------------------
for errorkey in sorted(StackErrors):
errorvalue = StackErrors[errorkey]
PrintWriteLine(u"--------------------------------------------------")
PrintWriteLine(errorkey)
PrintWriteLine(u"--------------------------------------------------")
for error in errorvalue:
PrintWriteLine(u"%8d: %s" % (error[0], error[1]))
finally:
fileoutput.close()
print u"Completed!"
print u" Output File : %s" % (OutputFile)
| gpl-3.0 | -4,486,569,772,765,041,000 | 37.088136 | 155 | 0.479403 | false |
yk5/incubator-airflow | airflow/hooks/S3_hook.py | 1 | 12717 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.contrib.hooks.aws_hook import AwsHook
from six import BytesIO
from urllib.parse import urlparse
import re
import fnmatch
class S3Hook(AwsHook):
"""
Interact with AWS S3, using the boto3 library.
"""
def get_conn(self):
return self.get_client_type('s3')
@staticmethod
def parse_s3_url(s3url):
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name instead of "%s"' % s3url)
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def check_for_bucket(self, bucket_name):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
try:
self.get_conn().head_bucket(Bucket=bucket_name)
return True
except:
return False
def get_bucket(self, bucket_name):
"""
Returns a boto3.S3.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
s3 = self.get_resource_type('s3')
return s3.Bucket(bucket_name)
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
Checks that a prefix exists in a bucket
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
def list_prefixes(self, bucket_name, prefix='', delimiter=''):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter)
has_results = False
prefixes = []
for page in response:
if 'CommonPrefixes' in page:
has_results = True
for p in page['CommonPrefixes']:
prefixes.append(p['Prefix'])
if has_results:
return prefixes
def list_keys(self, bucket_name, prefix='', delimiter=''):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter)
has_results = False
keys = []
for page in response:
if 'Contents' in page:
has_results = True
for k in page['Contents']:
keys.append(k['Key'])
if has_results:
return keys
def check_for_key(self, key, bucket_name=None):
"""
Checks if a key exists in a bucket
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
try:
self.get_conn().head_object(Bucket=bucket_name, Key=key)
return True
except:
return False
def get_key(self, key, bucket_name=None):
"""
Returns a boto3.s3.Object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
obj = self.get_resource_type('s3').Object(bucket_name, key)
obj.load()
return obj
def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8')
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization={'CSV': {}},
output_serialization={'CSV': {}}):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload']
for event in response['Payload']
if 'Records' in event)
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto3.s3.Object object matching the wildcard expression
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if klist:
key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)]
if key_matches:
return self.get_key(key_matches[0], bucket_name)
def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
"""
Loads a string to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param string_data: string to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
self.load_bytes(string_data.encode(encoding),
key=key,
bucket_name=bucket_name,
replace=replace,
encrypt=encrypt)
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)
| apache-2.0 | -2,631,231,125,961,501,700 | 34.621849 | 108 | 0.57663 | false |
walty8/trac | trac/db/tests/sqlite_test.py | 1 | 3385 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import tempfile
import unittest
from cStringIO import StringIO
from trac.config import ConfigurationError
from trac.env import Environment
from trac.test import MockRequest
from trac.tests.compat import rmtree
from trac.util import translation
class DatabaseFileTestCase(unittest.TestCase):
def setUp(self):
self.env_path = tempfile.mkdtemp(prefix='trac-tempenv-')
self.db_path = os.path.join(self.env_path, 'db', 'trac.db')
def tearDown(self):
rmtree(self.env_path)
def _create_env(self):
env = Environment(self.env_path, create=True)
env.shutdown()
def _db_query(self, env):
env.db_query("SELECT name FROM system")
def _make_environ(self, scheme='http', server_name='example.org',
server_port=80, method='GET', script_name='/trac',
cookie=None, **kwargs):
environ = {'wsgi.url_scheme': scheme, 'wsgi.input': StringIO(''),
'REQUEST_METHOD': method, 'SERVER_NAME': server_name,
'SERVER_PORT': server_port, 'SCRIPT_NAME': script_name}
if cookie:
environ['HTTP_COOKIE'] = cookie
environ.update(kwargs)
return environ
def test_missing_tracdb(self):
self._create_env()
os.remove(self.db_path)
env = Environment(self.env_path)
try:
self._db_query(env)
self.fail('ConfigurationError not raised')
except ConfigurationError as e:
self.assertIn('Database "', unicode(e))
self.assertIn('" not found.', unicode(e))
def test_no_permissions(self):
self._create_env()
os.chmod(self.db_path, 0444)
env = Environment(self.env_path)
try:
self._db_query(env)
self.fail('ConfigurationError not raised')
except ConfigurationError as e:
self.assertIn('requires read _and_ write permissions', unicode(e))
if os.name == 'posix' and os.getuid() == 0:
del test_no_permissions # For root, os.access() always returns True
def test_error_with_lazy_translation(self):
self._create_env()
os.remove(self.db_path)
env = Environment(self.env_path)
req = MockRequest(env, authname='trac_auth=1234567890')
translation.make_activable(lambda: req.locale, env.path)
try:
self._db_query(env)
self.fail('ConfigurationError not raised')
except ConfigurationError as e:
message = unicode(e)
self.assertIn('Database "', message)
self.assertIn('" not found.', message)
finally:
translation.deactivate()
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DatabaseFileTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause | 1,011,288,823,298,077,700 | 32.514851 | 78 | 0.631905 | false |
mluke93/osf.io | website/routes.py | 1 | 51178 | # -*- coding: utf-8 -*-
import os
import httplib as http
from flask import request
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import xml_renderer
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from modularodm import Q
from modularodm.exceptions import QueryException, NoResultsFound
from website import util
from website import prereg
from website import settings
from website import language
from website.util import metrics
from website.util import paths
from website.util import sanitize
from website import maintenance
from website.models import Institution
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.preprints import views as preprint_views
from website.institutions import views as institution_views
from website.notifications import views as notification_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
user_institutions = [{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path} for inst in user.affiliated_institutions] if user else []
all_institutions = [{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path} for inst in Institution.find().sort('name')]
if request.host_url != settings.DOMAIN:
try:
inst_id = (Institution.find_one(Q('domains', 'eq', request.host.lower())))._id
request_login_url = '{}institutions/{}'.format(settings.DOMAIN, inst_id)
except NoResultsFound:
request_login_url = request.url.replace(request.host_url, settings.DOMAIN)
else:
request_login_url = request.url
return {
'private_link_anonymous': is_private_link_anonymous_view(),
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_locale': user.locale if user and user.locale else '',
'user_timezone': user.timezone if user and user.timezone else '',
'user_url': user.url if user else '',
'user_gravatar': profile_views.current_user_gravatar(size=25)['gravatar_url'] if user else '',
'user_email_verifications': user.unconfirmed_email_info if user else [],
'user_api_url': user.api_url if user else '',
'user_entry_point': metrics.get_entry_point(user) if user else '',
'user_institutions': user_institutions if user else None,
'all_institutions': all_institutions,
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'noteworthy_links_node': settings.NEW_AND_NOTEWORTHY_LINKS_NODE,
'popular_links_node': settings.POPULAR_LINKS_NODE,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(request_login_url),
'reauth_url': util.web_url_for('auth_logout', redirect_url=request.url, reauth=True),
'profile_url': cas.get_profile_url(),
'enable_institutions': settings.ENABLE_INSTITUTIONS,
'keen_project_id': settings.KEEN_PROJECT_ID,
'keen_write_key': settings.KEEN_WRITE_KEY,
'maintenance': maintenance.get_maintenance(),
}
def is_private_link_anonymous_view():
try:
# Avoid circular import
from website.project.model import PrivateLink
return PrivateLink.find_one(
Q('key', 'eq', request.args.get('view_only'))
).anonymous
except QueryException:
return False
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', renderer=render_mako_string, trust=False)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('index'))
status.push_status_message(language.LOGOUT, kind='success', trust=False)
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule(
'/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string, trust=False)
),
Rule(
'/api/v1/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
json_renderer
),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule(
'/dashboard/',
'get',
website_views.dashboard,
OsfWebRenderer('home.mako', trust=False)
),
Rule(
'/myprojects/',
'get',
website_views.my_projects,
OsfWebRenderer('my_projects.mako', trust=False)
),
Rule(
'/reproducibility/',
'get',
website_views.reproducibility,
notemplate
),
Rule('/about/', 'get', website_views.redirect_about, notemplate),
Rule('/help/', 'get', website_views.redirect_help, notemplate),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako', trust=False)),
Rule(['/getting-started/', '/getting-started/email/', '/howosfworks/'], 'get', website_views.redirect_getting_started, notemplate),
Rule('/support/', 'get', {}, OsfWebRenderer('public/pages/support.mako', trust=False)),
Rule(
'/explore/',
'get',
{},
OsfWebRenderer('public/explore.mako', trust=False)
),
Rule(
[
'/messages/',
],
'get',
{},
OsfWebRenderer('public/comingsoon.mako', trust=False)
),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako', trust=False),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako', trust=False),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako', trust=False),
),
Rule(
'/api/v1/meetings/submissions/',
'get',
conference_views.conference_submissions,
json_renderer,
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule(
'/news/',
'get',
website_views.redirect_to_cos_news,
notemplate
),
Rule(
'/prereg/',
'get',
prereg.prereg_landing_page,
OsfWebRenderer('prereg_landing_page.mako', trust=False)
),
Rule(
'/preprints/',
'get',
preprint_views.preprint_landing_page,
OsfWebRenderer('public/pages/preprint_landing.mako', trust=False),
),
Rule(
'/preprint/',
'get',
preprint_views.preprint_redirect,
notemplate,
),
Rule(
'/api/v1/prereg/draft_registrations/',
'get',
prereg.prereg_draft_registrations,
json_renderer,
),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako', trust=False),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
process_rules(app, [
Rule('/confirmed_emails/', 'put', auth_views.unconfirmed_email_add, json_renderer),
Rule('/confirmed_emails/', 'delete', auth_views.unconfirmed_email_remove, json_renderer)
], prefix='/api/v1')
### Metadata ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
Rule('/forms/reset_password/', 'get', website_views.reset_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule(
'/explore/activity/',
'get',
discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako', trust=False)
),
])
### Auth ###
process_rules(app, [
# confirm email
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
notemplate
),
# reset password get
Rule(
'/resetpassword/<verification_key>/',
'get',
auth_views.reset_password_get,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# reset password post
Rule(
'/resetpassword/<verification_key>/',
'post',
auth_views.reset_password_post,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# resend confirmation get
Rule(
'/resend/',
'get',
auth_views.resend_confirmation_get,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# resend confirmation post
Rule(
'/resend/',
'post',
auth_views.resend_confirmation_post,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# user sign up page
Rule(
'/register/',
'get',
auth_views.auth_register,
OsfWebRenderer('public/login.mako', trust=False)
),
# create user account via api
Rule(
'/api/v1/register/',
'post',
auth_views.register_user,
json_renderer
),
# osf login and campaign login
Rule(
[
'/login/',
'/account/'
],
'get',
auth_views.auth_login,
OsfWebRenderer('public/login.mako', trust=False)
),
# osf logout and cas logout
Rule(
'/logout/',
'get',
auth_views.auth_logout,
notemplate
),
# forgot password get
Rule(
'/forgotpassword/',
'get',
auth_views.forgot_password_get,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
# forgot password post
Rule(
'/forgotpassword/',
'post',
auth_views.forgot_password_post,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
Rule(
'/login/connected_tools/',
'get',
landing_page_views.connected_tools,
notemplate
),
Rule(
'/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
notemplate
),
])
### Profile ###
# Web
process_rules(app, [
Rule(
'/profile/',
'get',
profile_views.profile_view,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
'/profile/<uid>/',
'get',
profile_views.profile_view_id,
OsfWebRenderer('profile.mako', trust=False)
),
# Route for claiming and setting email and password.
# Verification token must be querystring argument
Rule(
['/user/<uid>/<pid>/claim/'],
['get', 'post'],
project_views.contributor.claim_user_form,
OsfWebRenderer('claim_account.mako', trust=False)
),
Rule(
['/user/<uid>/<pid>/claim/verify/<token>/'],
['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako', trust=False)
),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako', trust=False),
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako', trust=False),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako', trust=False),
),
Rule(
'/settings/applications/',
'get',
profile_views.oauth_application_list,
OsfWebRenderer('profile/oauth_app_list.mako', trust=False)
),
Rule(
'/settings/applications/create/',
'get',
profile_views.oauth_application_register,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/applications/<client_id>/',
'get',
profile_views.oauth_application_detail,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/tokens/',
'get',
profile_views.personal_access_token_list,
OsfWebRenderer('profile/personal_tokens_list.mako', trust=False)
),
Rule(
'/settings/tokens/create/',
'get',
profile_views.personal_access_token_register,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
Rule(
'/settings/tokens/<_id>/',
'get',
profile_views.personal_access_token_detail,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
# TODO: Uncomment once outstanding issues with this feature are addressed
# Rule(
# '/@<twitter_handle>/',
# 'get',
# profile_views.redirect_to_twitter,
# OsfWebRenderer('error.mako', render_mako_string, trust=False)
# ),
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/resend/', 'put', profile_views.resend_confirmation, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id, json_renderer),
# Used by profile.html
Rule('/profile/<uid>/edit/', 'post', profile_views.edit_profile, json_renderer),
Rule('/profile/<uid>/public_projects/', 'get',
profile_views.get_public_projects, json_renderer),
Rule('/profile/<uid>/public_components/', 'get',
profile_views.get_public_components, json_renderer),
Rule('/profile/<user_id>/summary/', 'get',
profile_views.get_profile_summary, json_renderer),
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
'/profile/export/',
'post',
profile_views.request_export,
json_renderer,
),
Rule(
'/profile/deactivate/',
'post',
profile_views.request_deactivation,
json_renderer,
),
Rule(
[
'/profile/gravatar/',
'/users/gravatar/',
'/profile/gravatar/<size>',
'/users/gravatar/<size>',
],
'get',
profile_views.current_user_gravatar,
json_renderer,
),
Rule(
[
'/profile/<uid>/gravatar/',
'/users/<uid>/gravatar/',
'/profile/<uid>/gravatar/<size>',
'/users/<uid>/gravatar/<size>',
],
'get',
profile_views.get_gravatar,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule(
'/search/',
'get',
{},
OsfWebRenderer('search.mako', trust=False)
),
Rule(
'/share/',
'get',
{},
OsfWebRenderer('share_search.mako', trust=False)
),
Rule(
'/share/registration/',
'get',
{'register': settings.SHARE_REGISTRATION_URL},
OsfWebRenderer('share_registration.mako', trust=False)
),
Rule(
'/share/help/',
'get',
{'help': settings.SHARE_API_DOCS_URL},
OsfWebRenderer('share_api_docs.mako', trust=False)
),
Rule( # FIXME: Dead route; possible that template never existed; confirm deletion candidate with ErinB
'/share_dashboard/',
'get',
{},
OsfWebRenderer('share_dashboard.mako', trust=False)
),
Rule(
'/share/atom/',
'get',
search_views.search_share_atom,
xml_renderer
),
Rule('/api/v1/user/search/', 'get', search_views.search_contributor, json_renderer),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/search/', ['get', 'post'], search_views.search_share, json_renderer),
Rule('/share/stats/', 'get', search_views.search_share_stats, json_renderer),
Rule('/share/providers/', 'get', search_views.search_share_providers, json_renderer),
], prefix='/api/v1')
# Institution
process_rules(app, [
Rule('/institutions/<inst_id>/', 'get', institution_views.view_institution, OsfWebRenderer('institution.mako', trust=False))
])
# Project
# Web
process_rules(app, [
# '/' route loads home.mako if logged in, otherwise loads landing.mako
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako', trust=False)),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('landing.mako', trust=False)),
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'get',
project_views.node.view_project,
OsfWebRenderer('project/project.mako', trust=False)
),
# Create a new subproject/component
Rule(
'/project/<pid>/newnode/',
'post',
project_views.node.project_new_node,
notemplate
),
# # TODO: Add API endpoint for tags
# Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, OsfWebRenderer('tags.mako', trust=False)),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako', trust=False),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako', trust=False)
),
# Permissions
Rule( # TODO: Where, if anywhere, is this route used?
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako', trust=False)
),
### Logs ###
# View forks
Rule(
[
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
],
'get',
project_views.node.node_forks,
OsfWebRenderer('project/forks.mako', trust=False)
),
# Registrations
Rule(
[
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
],
'get',
project_views.register.node_register_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/register/<metaschema_id>/',
'/project/<pid>/node/<nid>/register/<metaschema_id>/',
],
'get',
project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'get',
project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'post',
project_views.drafts.new_draft_registration,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/',
],
'get',
project_views.drafts.edit_draft_registration_page,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
],
'get',
project_views.drafts.draft_before_register_page,
OsfWebRenderer('project/register_draft.mako', trust=False)),
Rule(
[
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/',
],
'get',
project_views.register.node_registration_retraction_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/',
],
'get',
project_views.register.node_registration_retraction_get,
OsfWebRenderer('project/retract_registration.mako', trust=False)
),
Rule(
'/ids/<category>/<path:value>/',
'get',
project_views.register.get_referent_by_identifier,
notemplate,
),
# Statistics
Rule(
[
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
],
'get',
project_views.node.project_statistics_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/analytics/',
'/project/<pid>/node/<nid>/analytics/',
],
'get',
project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako', trust=False)
),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
# TODO: Start waterbutler to test
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako', trust=False),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
'/project/<pid>/files/deleted/<trashed_id>/',
'/project/<pid>/node/<nid>/files/deleted/<trashed_id>/',
],
'get',
addon_views.addon_deleted_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
OsfWebRenderer('project/view_file.mako', trust=False),
),
Rule(
[
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by [coming replacement]
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/pointers/move/',
],
'post',
project_views.node.move_pointers,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointer/<pointer_id>',
],
'delete',
project_views.node.remove_pointer_from_folder,
json_renderer,
),
Rule([
'/project/<pid>/get_summary/',
'/project/<pid>/node/<nid>/get_summary/',
], 'get', project_views.node.get_summary, json_renderer),
# TODO: [#OSF-6557] Route "get_children" is deprecated. Use get_readable_descendants.
Rule([
'/project/<pid>/get_children/',
'/project/<pid>/node/<nid>/get_children/',
'/project/<pid>/get_readable_descendants/',
'/project/<pid>/node/<nid>/get_readable_descendants/',
], 'get', project_views.node.get_readable_descendants, json_renderer),
Rule([
'/project/<pid>/get_forks/',
'/project/<pid>/node/<nid>/get_forks/',
], 'get', project_views.node.get_forks, json_renderer),
Rule([
'/project/<pid>/get_registrations/',
'/project/<pid>/node/<nid>/get_registrations/',
], 'get', project_views.node.get_registrations, json_renderer),
# Draft Registrations
Rule([
'/project/<pid>/drafts/',
], 'get', project_views.drafts.get_draft_registrations, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'get', project_views.drafts.get_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'put', project_views.drafts.update_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'delete', project_views.drafts.delete_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/submit/',
], 'post', project_views.drafts.submit_draft_for_review, json_renderer),
# Meta Schemas
Rule([
'/project/drafts/schemas/',
], 'get', project_views.drafts.get_metaschemas, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule(
[
'/project/<pid>/contributor/remove/',
'/project/<pid>/node/<nid>/contributor/remove/',
],
'POST',
project_views.contributor.project_remove_contributor,
json_renderer,
),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Update
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'put',
project_views.node.update_node,
json_renderer,
),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Add / remove tags
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'post', project_views.tag.project_add_tag, json_renderer),
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'delete', project_views.tag.project_remove_tag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/fork/',
'/project/<pid>/node/<nid>/fork/',
], 'post', project_views.node.node_fork_page, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, json_renderer),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
], 'post', project_views.drafts.register_draft_registration, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page, json_renderer),
Rule([
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/'
], 'post', project_views.register.node_registration_retraction_post, json_renderer),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'get',
project_views.register.node_identifiers_get,
json_renderer,
),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'post',
project_views.register.node_identifiers_post,
json_renderer,
),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics, json_renderer),
# Permissions
Rule([
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
], 'post', project_views.node.project_set_privacy, json_renderer),
Rule([
'/project/<pid>/permissions/beforepublic/',
'/project/<pid>/node/<nid>/permissions/beforepublic/',
], 'get', project_views.node.project_before_set_public, json_renderer),
### Watching ###
Rule([
'/project/<pid>/watch/',
'/project/<pid>/node/<nid>/watch/'
], 'post', project_views.node.watch_post, json_renderer),
Rule([
'/project/<pid>/unwatch/',
'/project/<pid>/node/<nid>/unwatch/'
], 'post', project_views.node.unwatch_post, json_renderer),
Rule([
'/project/<pid>/togglewatch/',
'/project/<pid>/node/<nid>/togglewatch/'
], 'post', project_views.node.togglewatch_post, json_renderer),
# Combined files
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/'
],
'get',
project_views.file.collect_file_trees,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/registration/<pid>/callbacks/',
],
'put',
project_views.register.registration_callbacks,
json_renderer,
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/tree/',
'/project/<pid>/node/<nid>/tree/'
],
'get',
project_views.node.get_node_tree,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
)
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('website/addons')
if settings.DEV_MODE:
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
| apache-2.0 | 8,155,702,705,884,056,000 | 30.263286 | 147 | 0.506819 | false |
red-hood/calendarserver | txdav/caldav/datastore/scheduling/test/test_implicit.py | 1 | 65169 | ##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycalendar.datetime import DateTime
from pycalendar.timezone import Timezone
from txweb2 import responsecode
from txweb2.http import HTTPError
from twisted.internet import reactor
from twisted.internet.defer import succeed, inlineCallbacks, returnValue
from twisted.internet.task import deferLater
from twisted.trial.unittest import TestCase
from twistedcaldav.config import config
from twistedcaldav.ical import Component
from twistedcaldav.timezones import TimezoneCache
from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser
from txdav.caldav.datastore.scheduling.implicit import ImplicitScheduler
from txdav.caldav.datastore.scheduling.scheduler import ScheduleResponseQueue
from txdav.caldav.icalendarstore import AttendeeAllowedError, \
ComponentUpdateState
from txdav.caldav.datastore.sql import CalendarObject
from txdav.common.datastore.test.util import CommonCommonTests, populateCalendarsFrom
from twext.enterprise.jobqueue import JobItem
from twext.python.clsprop import classproperty
import hashlib
import sys
class FakeScheduler(object):
"""
A fake CalDAVScheduler that does nothing except track who messages were sent to.
"""
def __init__(self, recipients):
self.recipients = recipients
def doSchedulingViaPUT(self, originator, recipients, calendar, internal_request=False, suppress_refresh=False):
self.recipients.extend(recipients)
return succeed(ScheduleResponseQueue("FAKE", responsecode.OK))
class Implicit(CommonCommonTests, TestCase):
"""
iCalendar support tests
"""
@inlineCallbacks
def setUp(self):
yield super(Implicit, self).setUp()
yield self.buildStoreAndDirectory()
@inlineCallbacks
def test_removed_attendees(self):
data = (
(
"#1.1 Simple component, no change",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
(),
),
(
"#1.2 Simple component, one removal",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
(("mailto:[email protected]", None),),
),
(
"#1.3 Simple component, two removals",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", None),
("mailto:[email protected]", None),
),
),
(
"#2.1 Simple recurring component, two removals",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", None),
("mailto:[email protected]", None),
),
),
(
"#2.2 Simple recurring component, add exdate",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
EXDATE:20080801T120000Z
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
(
"#2.3 Simple recurring component, add multiple comma exdates",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
EXDATE:20080801T120000Z,20080901T120000Z
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 9, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 9, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 9, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
(
"#2.3 Simple recurring component, add multiple comma/property exdates",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
EXDATE:20080801T120000Z,20080901T120000Z
EXDATE:20081201T120000Z
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 9, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 9, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 9, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 12, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 12, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 12, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
(
"#3.1 Complex recurring component with same attendees, no change",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
(),
),
(
"#3.2 Complex recurring component with same attendees, change master/override",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", None),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
(
"#3.3 Complex recurring component with same attendees, change override",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
(
"#3.4 Complex recurring component with same attendees, change master",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", None),
),
),
(
"#3.5 Complex recurring component with same attendees, remove override - no exdate",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
END:VCALENDAR
""",
(),
),
(
"#3.6 Complex recurring component with same attendees, remove override - exdate",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
EXDATE:20080801T120000Z
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
(
"#4.1 Complex recurring component with different attendees, change master/override",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", None),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
(
"#4.2 Complex recurring component with different attendees, remove override - no exdate",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
(
"#4.3 Complex recurring component with different attendees, remove override - exdate",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
RECURRENCE-ID:20080801T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=MONTHLY
EXDATE:20080801T120000Z
END:VEVENT
END:VCALENDAR
""",
(
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
("mailto:[email protected]", DateTime(2008, 8, 1, 12, 0, 0, tzid=Timezone.UTCTimezone)),
),
),
)
for description, calendar1, calendar2, result in data:
scheduler = ImplicitScheduler()
scheduler.resource = None
scheduler.oldcalendar = Component.fromString(calendar1)
scheduler.oldAttendeesByInstance = scheduler.oldcalendar.getAttendeesByInstance(True, onlyScheduleAgentServer=True)
scheduler.oldInstances = set(scheduler.oldcalendar.getComponentInstances())
scheduler.calendar = Component.fromString(calendar2)
txn = self.transactionUnderTest()
scheduler.txn = txn
scheduler.calendar_home = yield self.homeUnderTest(txn=txn, name=u"user01", create=True)
yield scheduler.extractCalendarData()
scheduler.findRemovedAttendees()
self.assertEqual(scheduler.cancelledAttendees, set(result), msg=description)
yield self.commit()
@inlineCallbacks
def test_process_request_excludes_includes(self):
"""
Test that processRequests correctly excludes or includes the specified attendees.
"""
data = (
((), None, 3, ("mailto:[email protected]", "mailto:[email protected]", "mailto:[email protected]",),),
(("mailto:[email protected]",), None, 2, ("mailto:[email protected]", "mailto:[email protected]",),),
((), ("mailto:[email protected]", "mailto:[email protected]",) , 2, ("mailto:[email protected]", "mailto:[email protected]",),),
)
calendar = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
for excludes, includes, result_count, result_set in data:
scheduler = ImplicitScheduler()
scheduler.resource = None
scheduler.calendar = Component.fromString(calendar)
scheduler.state = "organizer"
scheduler.action = "modify"
scheduler.internal_request = True
scheduler.except_attendees = excludes
scheduler.only_refresh_attendees = includes
scheduler.changed_rids = None
scheduler.reinvites = None
txn = self.transactionUnderTest()
scheduler.txn = txn
scheduler.calendar_home = yield self.homeUnderTest(txn=txn, name=u"user01", create=True)
# Get some useful information from the calendar
yield scheduler.extractCalendarData()
record = yield self.directory.recordWithUID(scheduler.calendar_home.uid())
scheduler.organizerAddress = LocalCalendarUser(
"mailto:[email protected]",
record,
)
recipients = []
def makeFakeScheduler():
return FakeScheduler(recipients)
scheduler.makeScheduler = makeFakeScheduler
count = (yield scheduler.processRequests())
self.assertEqual(count, result_count)
self.assertEqual(len(recipients), result_count)
self.assertEqual(set(recipients), set(result_set))
yield self.commit()
class ImplicitRequests(CommonCommonTests, TestCase):
"""
Test txdav.caldav.datastore.scheduling.implicit.
"""
@inlineCallbacks
def setUp(self):
yield super(ImplicitRequests, self).setUp()
yield self.buildStoreAndDirectory()
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): #@NoSelf
return {
"user01": {
"calendar_1": {
},
"inbox": {
},
},
"user02": {
"calendar_1": {
},
"inbox": {
},
},
"user03": {
"calendar_1": {
},
"inbox": {
},
},
}
@inlineCallbacks
def _createCalendarObject(self, data, user, name):
calendar_collection = (yield self.calendarUnderTest(home=user))
yield calendar_collection.createCalendarObjectWithName("test.ics", Component.fromString(data))
yield self.commit()
@inlineCallbacks
def _listCalendarObjects(self, user, collection_name="calendar_1"):
collection = (yield self.calendarUnderTest(name=collection_name, home=user))
items = (yield collection.listCalendarObjects())
yield self.commit()
returnValue(items)
@inlineCallbacks
def _getCalendarData(self, user, name=None):
if name is None:
items = (yield self._listCalendarObjects(user))
name = items[0]
calendar_resource = (yield self.calendarObjectUnderTest(name=name, home=user))
calendar = (yield calendar_resource.component())
yield self.commit()
returnValue(str(calendar).replace("\r\n ", ""))
@inlineCallbacks
def _setCalendarData(self, data, user, name=None):
if name is None:
items = (yield self._listCalendarObjects(user))
name = items[0]
calendar_resource = (yield self.calendarObjectUnderTest(name=name, home=user))
yield calendar_resource.setComponent(Component.fromString(data))
yield self.commit()
@inlineCallbacks
def test_testImplicitSchedulingPUT_ScheduleState(self):
"""
Test that checkImplicitState() always returns True for any organizer, valid or not.
"""
data = (
(
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
""",
False,
),
(
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
True,
),
(
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""",
True,
),
)
calendar_collection = (yield self.calendarUnderTest(home="user01"))
for calendar, result in data:
calendar = Component.fromString(calendar)
scheduler = ImplicitScheduler()
doAction, isScheduleObject = (yield scheduler.testImplicitSchedulingPUT(calendar_collection, None, calendar, False))
self.assertEqual(doAction, result)
self.assertEqual(isScheduleObject, result)
@inlineCallbacks
def test_testImplicitSchedulingPUT_FixScheduleState(self):
"""
Test that testImplicitSchedulingPUT will fix an old cached schedule object state by
re-evaluating the calendar data.
"""
calendarOld = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""")
calendarNew = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""")
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calresource = (yield calendar_collection.createCalendarObjectWithName(
"1.ics", calendarOld
))
calresource.isScheduleObject = False
scheduler = ImplicitScheduler()
try:
doAction, isScheduleObject = (yield scheduler.testImplicitSchedulingPUT(calendar_collection, calresource, calendarNew, False))
except Exception as e:
print e
self.fail("Exception must not be raised")
self.assertTrue(doAction)
self.assertTrue(isScheduleObject)
@inlineCallbacks
def test_testImplicitSchedulingPUT_NoChangeScheduleState(self):
"""
Test that testImplicitSchedulingPUT will prevent attendees from changing the
schedule object state.
"""
calendarOld = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
""")
calendarNew = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 02":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
""")
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calresource = (yield calendar_collection.createCalendarObjectWithName(
"1.ics", calendarOld
))
calresource.isScheduleObject = False
scheduler = ImplicitScheduler()
try:
yield scheduler.testImplicitSchedulingPUT(calendar_collection, calresource, calendarNew, False)
except HTTPError:
pass
except:
self.fail("HTTPError exception must be raised")
else:
self.fail("Exception must be raised")
@inlineCallbacks
def test_doImplicitScheduling_NewOrganizerEvent(self):
"""
Test that doImplicitScheduling delivers scheduling messages to attendees.
"""
data = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
yield self._createCalendarObject(data, "user01", "test.ics")
list2 = (yield self._listCalendarObjects("user02"))
self.assertEqual(len(list2), 1)
self.assertTrue(list2[0].startswith(hashlib.md5("12345-67890").hexdigest()))
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 1)
self.assertTrue(list2[0].startswith(hashlib.md5("12345-67890").hexdigest()))
@inlineCallbacks
def test_doImplicitScheduling_UpdateOrganizerEvent(self):
"""
Test that doImplicitScheduling delivers scheduling messages to attendees.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T130000Z
DTEND:20080601T140000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
yield self._createCalendarObject(data1, "user01", "test.ics")
yield self._setCalendarData(data2, "user01", "test.ics")
list2 = (yield self._listCalendarObjects("user02"))
self.assertEqual(len(list2), 1)
self.assertTrue(list2[0].startswith(hashlib.md5("12345-67890").hexdigest()))
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 2)
self.assertTrue(list2[0].startswith(hashlib.md5("12345-67890").hexdigest()))
self.assertTrue(list2[1].startswith(hashlib.md5("12345-67890").hexdigest()))
@inlineCallbacks
def test_doImplicitScheduling_DeleteOrganizerEvent(self):
"""
Test that doImplicitScheduling delivers scheduling messages to attendees.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
yield self._createCalendarObject(data1, "user01", "test.ics")
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01"))
yield calendar_resource.remove()
yield self.commit()
list2 = (yield self._listCalendarObjects("user02"))
self.assertEqual(len(list2), 1)
self.assertTrue(list2[0].startswith(hashlib.md5("12345-67890").hexdigest()))
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 2)
self.assertTrue(list2[0].startswith(hashlib.md5("12345-67890").hexdigest()))
self.assertTrue(list2[1].startswith(hashlib.md5("12345-67890").hexdigest()))
@inlineCallbacks
def test_doImplicitScheduling_UpdateMailtoOrganizerEvent(self):
"""
Test that doImplicitScheduling works when the existing calendar data contains a non-normalized
organizer calendar user address.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01";SCHEDULE-AGENT=NONE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20080601T130000Z
DTEND:20080601T140000Z
ORGANIZER;CN="User 01";SCHEDULE-AGENT=NONE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
self.patch(CalendarObject.CalendarObjectUpgradeWork, "delay", 1)
yield self._createCalendarObject(data1, "user01", "test.ics")
cobj = yield self.calendarObjectUnderTest(home="user01", name="test.ics")
actualVersion = CalendarObject._currentDataVersion
self.patch(CalendarObject, "_currentDataVersion", 0)
yield cobj._setComponentInternal(Component.fromString(data1), internal_state=ComponentUpdateState.RAW)
CalendarObject._currentDataVersion = actualVersion
yield self.commit()
cobj = yield self.calendarObjectUnderTest(home="user01", name="test.ics")
comp = yield cobj.component()
# Because CUA normalization happens in component() now too...
self.assertTrue(comp.getOrganizer().startswith("urn:x-uid:"))
self.assertFalse(comp.getOrganizerScheduleAgent())
yield self.commit()
yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(home="user01", name="test.ics")
comp = yield cobj.component()
# Because CUA normalization happens in component() now too...
self.assertTrue(comp.getOrganizer().startswith("urn:x-uid:"))
self.assertFalse(comp.getOrganizerScheduleAgent())
yield self.commit()
cobj = yield self.calendarObjectUnderTest(home="user01", name="test.ics")
actualVersion = CalendarObject._currentDataVersion
self.patch(CalendarObject, "_currentDataVersion", 0)
yield cobj.setComponent(Component.fromString(data2))
CalendarObject._currentDataVersion = actualVersion
yield self.commit()
cobj = yield self.calendarObjectUnderTest(home="user01", name="test.ics")
comp = yield cobj.component()
self.assertTrue(comp.getOrganizer().startswith("urn:x-uid:"))
self.assertTrue(comp.getOrganizerScheduleAgent())
yield self.commit()
yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(home="user01", name="test.ics")
comp = yield cobj.component()
self.assertTrue(comp.getOrganizer().startswith("urn:x-uid:"))
self.assertTrue(comp.getOrganizerScheduleAgent())
yield self.commit()
@inlineCallbacks
def test_doImplicitScheduling_AttendeeEventNoOrganizerEvent(self):
"""
Test that doImplicitScheduling handles an attendee reply with no organizer event.
"""
data = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-no-organizer
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE;PARTSTAT=ACCEPTED:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
try:
yield self._createCalendarObject(data, "user02", "test.ics")
except AttendeeAllowedError:
pass
except:
self.fail("Wrong exception raised: %s" % (sys.exc_info()[0].__name__,))
else:
self.fail("Exception not raised")
list1 = (yield self._listCalendarObjects("user01", "inbox"))
self.assertEqual(len(list1), 0)
@inlineCallbacks
def test_doImplicitScheduling_AttendeeReply(self):
"""
Test that doImplicitScheduling delivers scheduling messages to attendees who can then reply.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE;PARTSTAT=ACCEPTED:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
yield self._createCalendarObject(data1, "user01", "test.ics")
calendar1 = (yield self._getCalendarData("user01", "test.ics"))
self.assertTrue("SCHEDULE-STATUS=1.2" in calendar1)
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 1)
yield self._setCalendarData(data2, "user02")
yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
list1 = (yield self._listCalendarObjects("user01", "inbox"))
self.assertEqual(len(list1), 1)
calendar1 = (yield self._getCalendarData("user01", "test.ics"))
self.assertTrue("SCHEDULE-STATUS=2.0" in calendar1)
self.assertTrue("PARTSTAT=ACCEPTED" in calendar1)
@inlineCallbacks
def test_doImplicitScheduling_refreshAllAttendeesExceptSome(self):
"""
Test that doImplicitScheduling delivers scheduling messages to attendees who can then reply.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE;PARTSTAT=ACCEPTED:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
# Need refreshes to occur immediately, not via reactor.callLater
self.patch(config.Scheduling.Options, "AttendeeRefreshBatch", False)
yield self._createCalendarObject(data1, "user01", "test.ics")
list1 = (yield self._listCalendarObjects("user01", "inbox"))
self.assertEqual(len(list1), 0)
calendar1 = (yield self._getCalendarData("user01", "test.ics"))
self.assertTrue("SCHEDULE-STATUS=1.2" in calendar1)
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 1)
calendar2 = (yield self._getCalendarData("user02"))
self.assertTrue("PARTSTAT=ACCEPTED" not in calendar2)
list3 = (yield self._listCalendarObjects("user03", "inbox"))
self.assertEqual(len(list3), 1)
calendar3 = (yield self._getCalendarData("user03"))
self.assertTrue("PARTSTAT=ACCEPTED" not in calendar3)
yield self._setCalendarData(data2, "user02")
yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
list1 = (yield self._listCalendarObjects("user01", "inbox"))
self.assertEqual(len(list1), 1)
calendar1 = (yield self._getCalendarData("user01", "test.ics"))
self.assertTrue("SCHEDULE-STATUS=2.0" in calendar1)
self.assertTrue("PARTSTAT=ACCEPTED" in calendar1)
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 1)
calendar2 = (yield self._getCalendarData("user02"))
self.assertTrue("PARTSTAT=ACCEPTED" in calendar2)
list3 = (yield self._listCalendarObjects("user03", "inbox"))
self.assertEqual(len(list3), 1)
calendar3 = (yield self._getCalendarData("user03"))
self.assertTrue("PARTSTAT=ACCEPTED" in calendar3)
@inlineCallbacks
def test_doImplicitScheduling_refreshAllAttendeesExceptSome_Batched(self):
"""
Test that doImplicitScheduling delivers scheduling messages to attendees who can then reply.
Verify that batched refreshing is working.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE;PARTSTAT=ACCEPTED:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
# Need refreshes to occur immediately, not via reactor.callLater
self.patch(config.Scheduling.Options, "AttendeeRefreshBatch", 5)
self.patch(config.Scheduling.Options.WorkQueues, "AttendeeRefreshBatchDelaySeconds", 1)
yield self._createCalendarObject(data1, "user01", "test.ics")
list1 = (yield self._listCalendarObjects("user01", "inbox"))
self.assertEqual(len(list1), 0)
calendar1 = (yield self._getCalendarData("user01", "test.ics"))
self.assertTrue("SCHEDULE-STATUS=1.2" in calendar1)
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 1)
calendar2 = (yield self._getCalendarData("user02"))
self.assertTrue("PARTSTAT=ACCEPTED" not in calendar2)
list3 = (yield self._listCalendarObjects("user03", "inbox"))
self.assertEqual(len(list3), 1)
calendar3 = (yield self._getCalendarData("user03"))
self.assertTrue("PARTSTAT=ACCEPTED" not in calendar3)
yield self._setCalendarData(data2, "user02")
yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
list1 = (yield self._listCalendarObjects("user01", "inbox"))
self.assertEqual(len(list1), 1)
calendar1 = (yield self._getCalendarData("user01", "test.ics"))
self.assertTrue("SCHEDULE-STATUS=2.0" in calendar1)
self.assertTrue("PARTSTAT=ACCEPTED" in calendar1)
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 1)
calendar2 = (yield self._getCalendarData("user02"))
self.assertTrue("PARTSTAT=ACCEPTED" in calendar2)
@inlineCallbacks
def _test_user03_refresh():
list3 = (yield self._listCalendarObjects("user03", "inbox"))
self.assertEqual(len(list3), 1)
calendar3 = (yield self._getCalendarData("user03"))
self.assertTrue("PARTSTAT=ACCEPTED" in calendar3)
yield deferLater(reactor, 2.0, _test_user03_refresh)
@inlineCallbacks
def test_doImplicitScheduling_OrganizerEventTimezoneDST(self):
"""
Test that doImplicitScheduling delivers scheduling messages to attendees. This test
creates an exception close to a DST transition to make sure timezone DST handling
is correct.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART;TZID=America/Los_Angeles:20140302T190000
DTEND;TZID=America/Los_Angeles:20140302T193000
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;UNTIL=20140309T075959Z
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART;TZID=America/Los_Angeles:20140302T190000
DTEND;TZID=America/Los_Angeles:20140302T193000
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;UNTIL=20140309T075959Z
END:VEVENT
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
RECURRENCE-ID;TZID=America/Los_Angeles:20140308T190000
DTSTART;TZID=America/Los_Angeles:20140308T190000
DTEND;TZID=America/Los_Angeles:20140308T193000
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
END:VCALENDAR
"""
TimezoneCache.create()
yield self._createCalendarObject(data1, "user01", "test.ics")
yield self._setCalendarData(data2, "user01", "test.ics")
list2 = (yield self._listCalendarObjects("user02"))
self.assertEqual(len(list2), 1)
self.assertTrue(list2[0].startswith(hashlib.md5("12345-67890").hexdigest()))
list2 = (yield self._listCalendarObjects("user02", "inbox"))
self.assertEqual(len(list2), 2)
self.assertTrue(list2[0].startswith(hashlib.md5("12345-67890").hexdigest()))
self.assertTrue(list2[1].startswith(hashlib.md5("12345-67890").hexdigest()))
@inlineCallbacks
def test_doImplicitScheduling_MissingAttendeeWithInvalidUser(self):
"""
Test that doImplicitMissingAttendee works when the event contains an
invalid attendee.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20140302T190000Z
DURATION:PT1H
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;UNTIL=20140309T075959Z
END:VEVENT
END:VCALENDAR
"""
yield self._createCalendarObject(data1, "user02", "test.ics")
list2 = (yield self._listCalendarObjects("user02"))
self.assertEqual(len(list2), 1)
yield self._setCalendarData(data1, "user02", "test.ics")
list2 = (yield self._listCalendarObjects("user02"))
self.assertEqual(len(list2), 1)
@inlineCallbacks
def test_doImplicitScheduling_MissingAttendeeWithiMIP(self):
"""
Test that doImplicitMissingAttendee works when iMIP is enabled and the event
contains an iMIP attendee.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20140302T190000Z
DURATION:PT1H
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;UNTIL=20140309T075959Z
END:VEVENT
END:VCALENDAR
"""
self.patch(config.Scheduling.iMIP, "Enabled", True)
self.patch(config.Scheduling.iMIP, "AddressPatterns", ["mailto:.*"])
yield self._createCalendarObject(data1, "user02", "test.ics")
list2 = (yield self._listCalendarObjects("user02"))
self.assertEqual(len(list2), 1)
yield self._setCalendarData(data1, "user02", "test.ics")
list2 = (yield self._listCalendarObjects("user02"))
self.assertEqual(len(list2), 1)
@inlineCallbacks
def test_sendAttendeeReply_ScheduleAgentNone(self):
"""
Test that sendAttendeeReply does nothing when the Organizer has
SCHEDULE-AGENT=NONE.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20140302T190000Z
DURATION:PT1H
ORGANIZER;SCHEDULE-AGENT=NONE;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;UNTIL=20140309T075959Z
END:VEVENT
END:VCALENDAR
"""
yield self._createCalendarObject(data1, "user02", "test.ics")
cobj = yield self.calendarObjectUnderTest(home="user02", name="test.ics",)
result = yield ImplicitScheduler().sendAttendeeReply(cobj._txn, cobj)
self.assertFalse(result)
@inlineCallbacks
def test_sendAttendeeReply_ScheduleAgentClient(self):
"""
Test that sendAttendeeReply does nothing when the Organizer has
SCHEDULE-AGENT=CLIENT.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20140302T190000Z
DURATION:PT1H
ORGANIZER;SCHEDULE-AGENT=CLIENT;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;UNTIL=20140309T075959Z
END:VEVENT
END:VCALENDAR
"""
yield self._createCalendarObject(data1, "user02", "test.ics")
cobj = yield self.calendarObjectUnderTest(home="user02", name="test.ics",)
result = yield ImplicitScheduler().sendAttendeeReply(cobj._txn, cobj)
self.assertFalse(result)
@inlineCallbacks
def test_sendAttendeeReply_NoAttendee(self):
"""
Test that sendAttendeeReply does nothing when the Attencdee is not
listed in the event. This will not normally ever be possible, but a case
like this was seen due to a processing error elsewehere.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20080601T120000Z
DTSTART:20140302T190000Z
DURATION:PT1H
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;UNTIL=20140309T075959Z
END:VEVENT
END:VCALENDAR
"""
yield self._createCalendarObject(data1, "user02", "test.ics")
cobj = yield self.calendarObjectUnderTest(home="user02", name="test.ics",)
# Need to remove SCHEDULE-AGENT=NONE on ORGANIZER as that will have been added during the store operation
cal = yield cobj.componentForUser()
cal.removePropertyParameters("ORGANIZER", ("SCHEDULE-AGENT", "SCHEDULE-STATUS",))
result = yield ImplicitScheduler().sendAttendeeReply(cobj._txn, cobj)
self.assertFalse(result)
class ScheduleAgentFixBase(CommonCommonTests, TestCase):
"""
Test txdav.caldav.datastore.scheduling.implicit.
"""
@inlineCallbacks
def setUp(self):
yield super(ScheduleAgentFixBase, self).setUp()
yield self.buildStoreAndDirectory()
yield self.populate()
self.patch(config.Scheduling.Options, "AttendeeRefreshBatch", 0)
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
metadata = {
"accessMode": "PUBLIC",
"isScheduleObject": True,
"scheduleTag": "abc",
"scheduleEtags": (),
"hasPrivateComment": False,
}
@classproperty(cache=False)
def requirements(cls): #@NoSelf
return {
"user01": {
"calendar_1": {
"organizer.ics": (cls.organizer_data, cls.metadata),
},
"inbox": {
},
},
"user02": {
"calendar_1": {
"attendee2.ics": (cls.attendee2_data, cls.metadata),
},
"inbox": {
},
},
"user03": {
"calendar_1": {
"attendee3.ics": (cls.attendee3_data, cls.metadata),
},
"inbox": {
},
},
}
class ScheduleAgentFix(ScheduleAgentFixBase):
"""
Test that implicit scheduling where an attendee has S-A=CLIENT and S-A=SERVER is
corrected when the attendee updates.
"""
organizer_data = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:20140101T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user03
RRULE:FREQ=DAILY
END:VEVENT
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
RECURRENCE-ID:20140102T100000Z
DTSTART:20140102T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user02
ATTENDEE:urn:x-uid:user03
END:VEVENT
END:VCALENDAR
"""
attendee2_data = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:20140101T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER;SCHEDULE-AGENT=CLIENT:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user03
RRULE:FREQ=DAILY
END:VEVENT
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
RECURRENCE-ID:20140102T100000Z
DTSTART:20140102T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER;SCHEDULE-AGENT=SERVER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user02
ATTENDEE:urn:x-uid:user03
END:VEVENT
END:VCALENDAR
"""
attendee2_update_data = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:20140101T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER;SCHEDULE-AGENT=CLIENT:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user03
RRULE:FREQ=DAILY
END:VEVENT
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
RECURRENCE-ID:20140102T100000Z
DTSTART:20140102T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER;SCHEDULE-AGENT=SERVER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:x-uid:user02
ATTENDEE:urn:x-uid:user03
END:VEVENT
END:VCALENDAR
"""
attendee3_data = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:20140101T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user03
RRULE:FREQ=DAILY
END:VEVENT
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
RECURRENCE-ID:20140102T100000Z
DTSTART:20140102T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user02
ATTENDEE:urn:x-uid:user03
END:VEVENT
END:VCALENDAR
"""
@inlineCallbacks
def test_doImplicitScheduling(self):
"""
Test that doImplicitScheduling fixes an inconsistent schedule-agent state when an
attendee stores their data.
"""
cobj = yield self.calendarObjectUnderTest(home="user02", name="attendee2.ics")
yield cobj.setComponent(Component.fromString(self.attendee2_update_data))
yield self.commit()
cobj = yield self.calendarObjectUnderTest(home="user02", name="attendee2.ics")
comp = yield cobj.component()
self.assertTrue(comp.masterComponent() is None)
self.assertTrue(comp.getOrganizerScheduleAgent())
inbox = yield self.calendarUnderTest(home="user01", name="inbox")
cobjs = yield inbox.calendarObjects()
self.assertTrue(len(cobjs) == 1)
class MissingOrganizerFix(ScheduleAgentFixBase):
"""
Test that an attendee with a copy of an event without any organizer or attendee
properties is corrected when the organizer updates.
"""
organizer_data = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:20140101T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user03
END:VEVENT
END:VCALENDAR
"""
organizer_update_data = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:20140101T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user02
ATTENDEE:urn:x-uid:user03
END:VEVENT
END:VCALENDAR
"""
attendee2_data = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:20140101T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
END:VEVENT
END:VCALENDAR
"""
attendee3_data = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:20140101T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:urn:x-uid:user01
ATTENDEE:urn:x-uid:user01
ATTENDEE:urn:x-uid:user03
END:VEVENT
END:VCALENDAR
"""
@inlineCallbacks
def test_doImplicitScheduling(self):
"""
Test that doImplicitScheduling fixes an inconsistent schedule-agent state when an
attendee stores their data.
"""
cobj = yield self.calendarObjectUnderTest(home="user02", name="attendee2.ics")
comp = yield cobj.component()
self.assertTrue(comp.getOrganizer() is None)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(home="user01", name="organizer.ics")
yield cobj.setComponent(Component.fromString(self.organizer_update_data))
yield self.commit()
cal = yield self.calendarUnderTest(home="user02")
cobjs = yield cal.calendarObjects()
self.assertTrue(len(cobjs) == 2)
for cobj in cobjs:
comp = yield cobj.component()
if comp.resourceUID() == "[email protected]":
self.assertTrue(comp.getOrganizer() is not None)
else:
self.assertTrue(comp.getOrganizer() is None)
inbox = yield self.calendarUnderTest(home="user02", name="inbox")
cobjs = yield inbox.calendarObjects()
self.assertTrue(len(cobjs) == 1)
| apache-2.0 | -7,479,997,661,628,814,000 | 29.667765 | 144 | 0.707085 | false |
Eric89GXL/vispy | vispy/visuals/graphs/layouts/force_directed.py | 1 | 7502 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Force-Directed Graph Layout
===========================
This module contains implementations for a force-directed layout, where the
graph is modelled like a collection of springs or as a collection of
particles attracting and repelling each other. The whole graph tries to
reach a state which requires the minimum energy.
"""
import numpy as np
try:
from scipy.sparse import issparse
except ImportError:
def issparse(*args, **kwargs):
return False
from ..util import _straight_line_vertices, _rescale_layout
class fruchterman_reingold(object):
"""
Fruchterman-Reingold implementation adapted from NetworkX.
In the Fruchterman-Reingold algorithm, the whole graph is modelled as a
collection of particles, it runs a simplified particle simulation to
find a nice layout for the graph.
Paramters
---------
optimal : number
Optimal distance between nodes. Defaults to :math:`1/\\sqrt{N}` where
N is the number of nodes.
iterations : int
Number of iterations to perform for layout calculation.
pos : array
Initial positions of the nodes
Notes
-----
The algorithm is explained in more detail in the original paper [1]_.
.. [1] Fruchterman, Thomas MJ, and Edward M. Reingold. "Graph drawing by
force-directed placement." Softw., Pract. Exper. 21.11 (1991),
1129-1164.
"""
def __init__(self, optimal=None, iterations=50, pos=None):
self.dim = 2
self.optimal = optimal
self.iterations = iterations
self.num_nodes = None
self.pos = pos
def __call__(self, adjacency_mat, directed=False):
"""
Starts the calculation of the graph layout.
This is a generator, and after each iteration it yields the new
positions for the nodes, together with the vertices for the edges
and the arrows.
There are two solvers here: one specially adapted for SciPy sparse
matrices, and the other for larger networks.
Parameters
----------
adjacency_mat : array
The graph adjacency matrix.
directed : bool
Wether the graph is directed or not. If this is True,
it will draw arrows for directed edges.
Yields
------
layout : tuple
For each iteration of the layout calculation it yields a tuple
containing (node_vertices, line_vertices, arrow_vertices). These
vertices can be passed to the `MarkersVisual` and `ArrowVisual`.
"""
if adjacency_mat.shape[0] != adjacency_mat.shape[1]:
raise ValueError("Adjacency matrix should be square.")
self.num_nodes = adjacency_mat.shape[0]
if issparse(adjacency_mat):
# Use the sparse solver
solver = self._sparse_fruchterman_reingold
else:
solver = self._fruchterman_reingold
for result in solver(adjacency_mat, directed):
yield result
def _fruchterman_reingold(self, adjacency_mat, directed=False):
if self.optimal is None:
self.optimal = 1 / np.sqrt(self.num_nodes)
if self.pos is None:
# Random initial positions
pos = np.asarray(
np.random.random((self.num_nodes, self.dim)),
dtype=np.float32
)
else:
pos = self.pos.astype(np.float32)
# Yield initial positions
line_vertices, arrows = _straight_line_vertices(adjacency_mat, pos,
directed)
yield pos, line_vertices, arrows
# The initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t = 0.1
# Simple cooling scheme.
# Linearly step down by dt on each iteration so last iteration is
# size dt.
dt = t / float(self.iterations+1)
# The inscrutable (but fast) version
# This is still O(V^2)
# Could use multilevel methods to speed this up significantly
for iteration in range(self.iterations):
delta_pos = _calculate_delta_pos(adjacency_mat, pos, t,
self.optimal)
pos += delta_pos
_rescale_layout(pos)
# cool temperature
t -= dt
# Calculate edge vertices and arrows
line_vertices, arrows = _straight_line_vertices(adjacency_mat,
pos, directed)
yield pos, line_vertices, arrows
def _sparse_fruchterman_reingold(self, adjacency_mat, directed=False):
# Optimal distance between nodes
if self.optimal is None:
self.optimal = 1 / np.sqrt(self.num_nodes)
# Change to list of list format
# Also construct the matrix in COO format for easy edge construction
adjacency_arr = adjacency_mat.toarray()
adjacency_coo = adjacency_mat.tocoo()
if self.pos is None:
# Random initial positions
pos = np.asarray(
np.random.random((self.num_nodes, self.dim)),
dtype=np.float32
)
else:
pos = self.pos.astype(np.float32)
# Yield initial positions
line_vertices, arrows = _straight_line_vertices(adjacency_coo, pos,
directed)
yield pos, line_vertices, arrows
# The initial "temperature" is about .1 of domain area (=1x1)
# This is the largest step allowed in the dynamics.
t = 0.1
# Simple cooling scheme.
# Linearly step down by dt on each iteration so last iteration is
# size dt.
dt = t / float(self.iterations+1)
for iteration in range(self.iterations):
delta_pos = _calculate_delta_pos(adjacency_arr, pos, t,
self.optimal)
pos += delta_pos
_rescale_layout(pos)
# Cool temperature
t -= dt
# Calculate line vertices
line_vertices, arrows = _straight_line_vertices(adjacency_coo,
pos, directed)
yield pos, line_vertices, arrows
def _calculate_delta_pos(adjacency_arr, pos, t, optimal):
"""Helper to calculate the delta position"""
# XXX eventually this should be refactored for the sparse case to only
# do the necessary pairwise distances
delta = pos[:, np.newaxis, :] - pos
# Distance between points
distance2 = (delta*delta).sum(axis=-1)
# Enforce minimum distance of 0.01
distance2 = np.where(distance2 < 0.0001, 0.0001, distance2)
distance = np.sqrt(distance2)
# Displacement "force"
displacement = np.zeros((len(delta), 2))
for ii in range(2):
displacement[:, ii] = (
delta[:, :, ii] *
((optimal * optimal) / (distance*distance) -
(adjacency_arr * distance) / optimal)).sum(axis=1)
length = np.sqrt((displacement**2).sum(axis=1))
length = np.where(length < 0.01, 0.1, length)
delta_pos = displacement * t / length[:, np.newaxis]
return delta_pos
| bsd-3-clause | -9,037,603,166,237,468,000 | 34.386792 | 77 | 0.589443 | false |
mrachinskiy/booltron | ops_destructive/destructive_func.py | 1 | 5385 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# Booltron super add-on for super fast booleans.
# Copyright (C) 2014-2021 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from .. import var, lib
from . import mesh_lib
def cursor_state(func):
def wrapper(*args):
bpy.context.window.cursor_set("WAIT")
result = func(*args)
bpy.context.window.cursor_set("DEFAULT")
return result
return wrapper
def prepare_objects(self, context):
ob1 = context.object
obs = context.selected_objects
if ob1.select_get():
obs.remove(ob1)
if self.keep_objects:
space_data = context.space_data
use_local_view = bool(space_data.local_view)
obs_copy = []
app = obs_copy.append
for ob in obs:
ob_copy = ob.copy()
ob_copy.data = ob.data.copy()
for coll in ob.users_collection:
coll.objects.link(ob_copy)
if use_local_view:
ob_copy.local_view_set(space_data, True)
ob_copy.select_set(True)
ob.select_set(False)
app(ob_copy)
obs = obs_copy
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.convert(target="MESH")
if self.use_pos_offset:
lib.object_offset(obs, self.pos_offset)
return obs
@cursor_state
def execute(self, context):
Mesh = mesh_lib.Utils(self)
boolean_mod = lib.ModUtils(self).add
ob1 = context.object
obs = prepare_objects(self, context)
ob2 = obs.pop()
if obs:
if self.is_overlap:
Mesh.prepare(ob2, select=True)
for ob3 in obs:
Mesh.prepare(ob3, select=True)
boolean_mod(ob2, ob3, "UNION")
if self.cleanup:
Mesh.cleanup(ob2)
else:
obs.append(ob2)
override = {
"active_object": ob2,
"selected_editable_objects": obs,
}
bpy.ops.object.join(override)
if not self.is_overlap:
Mesh.prepare(ob2, select=True)
Mesh.prepare(ob1, select=False)
boolean_mod(ob1, ob2, self.mode)
if self.cleanup:
Mesh.cleanup(ob1)
Mesh.check(ob1)
return {"FINISHED"}
def invoke(self, context, event):
obs = []
app = obs.append
for ob in context.selected_objects:
if ob.type not in {"MESH", "CURVE", "SURFACE", "META", "FONT"}:
ob.select_set(False)
continue
app(ob)
if len(obs) < 2:
self.report({"ERROR"}, "At least two objects must be selected")
return {"CANCELLED"}
if self.first_run:
self.first_run = False
prefs = context.preferences.addons[var.ADDON_ID].preferences
self.solver = prefs.solver
self.threshold = prefs.threshold
self.use_pos_offset = prefs.use_pos_offset
self.pos_offset = prefs.pos_offset
self.merge_distance = prefs.merge_distance
self.cleanup = prefs.cleanup
self.triangulate = prefs.triangulate
self.keep_objects = event.alt
self.is_overlap = False
if len(obs) > 2 and self.mode is not None:
obs.remove(context.object)
self.is_overlap = mesh_lib.detect_overlap(context, obs, self.merge_distance)
if event.ctrl:
wm = context.window_manager
return wm.invoke_props_dialog(self)
return self.execute(context)
@cursor_state
def execute_slice(self, context):
Mesh = mesh_lib.Utils(self)
boolean_mod = lib.ModUtils(self).add
space_data = context.space_data
use_local_view = bool(space_data.local_view)
ob1 = context.object
obs = prepare_objects(self, context)
Mesh.prepare(ob1, select=False)
for ob2 in obs:
Mesh.prepare(ob2, select=True)
# Create copy of main object
# ---------------------------------
ob1_copy = ob1.copy()
ob1_copy.data = ob1.data.copy()
for coll in ob1.users_collection:
coll.objects.link(ob1_copy)
if use_local_view:
ob1_copy.local_view_set(space_data, True)
ob1_copy.select_set(True)
# Main object difference
# ---------------------------------
boolean_mod(ob1, ob2, "DIFFERENCE", remove_ob2=False)
if Mesh.check(ob1):
return {"FINISHED"}
# Copy object intersect
# ---------------------------------
boolean_mod(ob1_copy, ob2, "INTERSECT")
if Mesh.check(ob1_copy):
return {"FINISHED"}
if self.cleanup:
Mesh.cleanup(ob1)
ob1.select_set(False)
context.view_layer.objects.active = ob1_copy
return {"FINISHED"}
| gpl-3.0 | 4,390,248,468,535,569,000 | 25.268293 | 84 | 0.591272 | false |
googleapis/python-pubsub | google/pubsub_v1/services/schema_service/pagers.py | 1 | 5563 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.pubsub_v1.types import schema
class ListSchemasPager:
"""A pager for iterating through ``list_schemas`` requests.
This class thinly wraps an initial
:class:`google.pubsub_v1.types.ListSchemasResponse` object, and
provides an ``__iter__`` method to iterate through its
``schemas`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSchemas`` requests and continue to iterate
through the ``schemas`` field on the
corresponding responses.
All the usual :class:`google.pubsub_v1.types.ListSchemasResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., schema.ListSchemasResponse],
request: schema.ListSchemasRequest,
response: schema.ListSchemasResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.pubsub_v1.types.ListSchemasRequest):
The initial request object.
response (google.pubsub_v1.types.ListSchemasResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = schema.ListSchemasRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[schema.ListSchemasResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[schema.Schema]:
for page in self.pages:
yield from page.schemas
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSchemasAsyncPager:
"""A pager for iterating through ``list_schemas`` requests.
This class thinly wraps an initial
:class:`google.pubsub_v1.types.ListSchemasResponse` object, and
provides an ``__aiter__`` method to iterate through its
``schemas`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSchemas`` requests and continue to iterate
through the ``schemas`` field on the
corresponding responses.
All the usual :class:`google.pubsub_v1.types.ListSchemasResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[schema.ListSchemasResponse]],
request: schema.ListSchemasRequest,
response: schema.ListSchemasResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.pubsub_v1.types.ListSchemasRequest):
The initial request object.
response (google.pubsub_v1.types.ListSchemasResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = schema.ListSchemasRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[schema.ListSchemasResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[schema.Schema]:
async def async_generator():
async for page in self.pages:
for response in page.schemas:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | 3,531,521,777,943,690,000 | 34.433121 | 87 | 0.6421 | false |
pozetroninc/micropython | tools/codeformat.py | 1 | 5981 | #!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Damien P. George
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import glob
import itertools
import os
import re
import subprocess
# Relative to top-level repo dir.
PATHS = [
# C
"extmod/*.[ch]",
"lib/netutils/*.[ch]",
"lib/timeutils/*.[ch]",
"lib/utils/*.[ch]",
"mpy-cross/*.[ch]",
"ports/*/*.[ch]",
"ports/windows/msvc/**/*.[ch]",
"py/*.[ch]",
# Python
"drivers/**/*.py",
"examples/**/*.py",
"extmod/**/*.py",
"ports/**/*.py",
"py/**/*.py",
"tools/**/*.py",
]
EXCLUSIONS = [
# STM32 build includes generated Python code.
"ports/*/build*",
# gitignore in ports/unix ignores *.py, so also do it here.
"ports/unix/*.py",
]
# Path to repo top-level dir.
TOP = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
UNCRUSTIFY_CFG = os.path.join(TOP, "tools/uncrustify.cfg")
C_EXTS = (
".c",
".h",
)
PY_EXTS = (".py",)
FIXUP_REPLACEMENTS = ((re.compile("sizeof\(([a-z_]+)\) \*\(([a-z_]+)\)"), r"sizeof(\1) * (\2)"),)
def list_files(paths, exclusions=None, prefix=""):
files = set()
for pattern in paths:
files.update(glob.glob(os.path.join(prefix, pattern), recursive=True))
for pattern in exclusions or []:
files.difference_update(glob.fnmatch.filter(files, os.path.join(prefix, pattern)))
return sorted(files)
def fixup_c(filename):
# Read file.
with open(filename) as f:
lines = f.readlines()
# Write out file with fixups.
with open(filename, "w", newline="") as f:
dedent_stack = []
while lines:
# Get next line.
l = lines.pop(0)
# Dedent #'s to match indent of following line (not previous line).
m = re.match(r"( +)#(if |ifdef |ifndef |elif |else|endif)", l)
if m:
indent = len(m.group(1))
directive = m.group(2)
if directive in ("if ", "ifdef ", "ifndef "):
l_next = lines[0]
indent_next = len(re.match(r"( *)", l_next).group(1))
if indent - 4 == indent_next and re.match(r" +(} else |case )", l_next):
# This #-line (and all associated ones) needs dedenting by 4 spaces.
l = l[4:]
dedent_stack.append(indent - 4)
else:
# This #-line does not need dedenting.
dedent_stack.append(-1)
else:
if dedent_stack[-1] >= 0:
# This associated #-line needs dedenting to match the #if.
indent_diff = indent - dedent_stack[-1]
assert indent_diff >= 0
l = l[indent_diff:]
if directive == "endif":
dedent_stack.pop()
# Apply general regex-based fixups.
for regex, replacement in FIXUP_REPLACEMENTS:
l = regex.sub(replacement, l)
# Write out line.
f.write(l)
assert not dedent_stack, filename
def main():
cmd_parser = argparse.ArgumentParser(description="Auto-format C and Python files.")
cmd_parser.add_argument("-c", action="store_true", help="Format C code only")
cmd_parser.add_argument("-p", action="store_true", help="Format Python code only")
cmd_parser.add_argument("files", nargs="*", help="Run on specific globs")
args = cmd_parser.parse_args()
# Setting only one of -c or -p disables the other. If both or neither are set, then do both.
format_c = args.c or not args.p
format_py = args.p or not args.c
# Expand the globs passed on the command line, or use the default globs above.
files = []
if args.files:
files = list_files(args.files)
else:
files = list_files(PATHS, EXCLUSIONS, TOP)
# Extract files matching a specific language.
def lang_files(exts):
for file in files:
if os.path.splitext(file)[1].lower() in exts:
yield file
# Run tool on N files at a time (to avoid making the command line too long).
def batch(cmd, files, N=200):
while True:
file_args = list(itertools.islice(files, N))
if not file_args:
break
subprocess.check_call(cmd + file_args)
# Format C files with uncrustify.
if format_c:
batch(["uncrustify", "-c", UNCRUSTIFY_CFG, "-lC", "--no-backup"], lang_files(C_EXTS))
for file in lang_files(C_EXTS):
fixup_c(file)
# Format Python files with black.
if format_py:
batch(["black", "-q", "--fast", "--line-length=99"], lang_files(PY_EXTS))
if __name__ == "__main__":
main()
| mit | -5,834,360,555,829,730,000 | 32.982955 | 97 | 0.584852 | false |
Archman/beamline | contrib/demo1.py | 1 | 9941 | #!/usr/bin/python
# coding: utf-8
# ## Code demonstration for using *beamline* python package to do online modeling
#
# Tong Zhang, March, 2016 (draft)
#
# For example, define lattice configuration for a 4-dipole chicane with quads:
#
# |-|---|-|
# / \
# ---||---|-| |-|---||---
#
# i.e. drift + quad + drift
# + dipole + drift + dipole + drift
# + dipole + drift + dipole
# + drift + quad + drift
#
# Below is the typical workflow and interleaved comments.
import beamline
import os
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import copy
# ### Section 1: Magnetic Elements Modeling
# The very first step need to push forward is to correctly model the physical elements (one by one), in the *beamline* package, magnet components classes could be found in *element* module, e.g. quadrupole is abstracted in *ElementQuad* class, charge is in *ElementCharge*, etc., they are all inherited from *MagBlock*.
#
# The common or shared information/configuration for all these elements could be predefined in *MagBlock* class, e.g. we can put information like facility name, time stamp, author, etc., common information is presumed not changed, so please defined in the first step (see STEP 1).
#
# To set the elements' configuration, method *setConf(config, type)* could be used to, in which 'config' is either configuration string with the format like "k1=10.0,l=0.1" or python dictionary like "{'k1': 10.0, 'l': 0.1}", and 'type' is the configuration type to be confiugred, could be 'comm' [common configuration], 'ctrl' [control configuration], 'simu' [simulation configuration], 'misc' [miscellaneous configuration] and 'all' [all configurations].
#
# The unit between EPICS PV values and real physical variables usually are required to do conversions, so in the design stage, the method *unitTrans(inval,direction = '+',transfun = None)* is created for handling this issue. One can define this conversion function at the class stage, but this approach is limited to the case that all the elements with the same type only could share the same conversion function, which is not proper in the real situation. Thus, *transfun* is created as the input function parameter for *unitTrans* method, which is a user-defined function for each element object.
# #### STEP 1: define common information
#commdinfo = {'DATE': '2016-03-22', 'AUTHOR': 'Tong Zhang'}
comminfo = 'DATE = 2016-03-24, AUTHOR = Tong Zhang'
beamline.MagBlock.setCommInfo(comminfo)
# set visualization style
beamline.MagBlock.setStyleConfig(
config={'quad':{'fc':'blue', 'ec': 'blue'},
'bend':{'fc':'red', 'ec': 'red'}})
# #### STEP 2: create elements
# charge, this is visual element for the real accelerator, but is a must for elegant tracking
chconf = {'total':1e-9}
q = beamline.ElementCharge(name = 'q', config = chconf)
# csrcsben, use elegant element name
# simconf is complementary configurations for elegant tracking,
# should set with setConf(simconf, type='simu') method
simconf = {"edge1_effects": 1,
"edge2_effects":1,
"hgap":0.015,
"csr":0,
"nonlinear":1,
"n_kicks":100,
"integration_order":4,
"bins":512,
"sg_halfwidth":1,
"block_csr":0,
'l':0.5,}
angle = 0.1 # rad
B1 = beamline.ElementCsrcsben(name = 'b1', config = {'angle':angle, 'e1':0, 'e2':angle})
B1.setConf(simconf, type = 'simu')
B2 = beamline.ElementCsrcsben(name = 'b2', config = {'angle':-angle, 'e1':-angle, 'e2':0})
B3 = beamline.ElementCsrcsben(name = 'b3', config = {'angle':-angle, 'e1':0, 'e2':-angle})
B4 = beamline.ElementCsrcsben(name = 'b4', config = {'angle': angle, 'e1':angle, 'e2':0})
B2.setConf(simconf, type = 'simu')
B3.setConf(simconf, type = 'simu')
B4.setConf(simconf, type = 'simu')
#print B1.getMatrix()
#print B1.printConfig(type='all')
#print B1.calcTransM(gamma = 200)
#print B1.field
#print B1.rho
#print B1.printConfig(type='all')
# drift
D0 = beamline.ElementDrift(name = 'D0', config = "l=1.0")
#D0.calcTransM(gamma = 200)
#print D0.getMatrix()
# quad
# user-defined unit conversion function,
# direction '+' means convertion from EPICS PV value to physical value,
# direction '-' means convertion from physical value to EPICS PV value,
def fUnitTrans(val, direction):
if direction == '+':
return val*4.0
else:
return val*0.25
# create instance and apply user-defined unit conversion function
Q1 = beamline.ElementQuad(name = 'Q1', config = "k1 = 10, l = 0.5")
simuconf = {'tilt':"pi 4 /"}
Q1.setConf(simuconf, type = 'simu')
# control configurations for Q1
ctrlconf = {"k1":{'pv':"sxfel:lattice:Q09",'val':''}}
Q1.setConf(ctrlconf, type = 'ctrl')
Q1.transfun = fUnitTrans # apply unit conversion function
# print 'online' configuration, 'online' will replace simulation field with control field
print Q1.dumpConfig(type='online')
#Q1.printConfig(type = 'simu')
Q1.printConfig(type = 'all')
print Q1.getK1(type='ctrl')
Q1.calcTransM(gamma = 200)
print Q1.getMatrix()
#import sys
#sys.exit(1)
# #### STEP 3: make lattice beamline
# METHOD 1: CANNOT get all configurations
# use 'ElementBeamline' class of 'element' module
#
# beamline
latele = [obj.name for obj in [q, D0, Q1, D0, B1, D0, B2, D0, D0, B3, D0, B4, D0, Q1, D0]]
latstr = '(' + ' '.join(latele) + ')'
bl = beamline.ElementBeamline(name = 'bl', config = {'lattice':latstr})
#bl = beamline.ElementBeamline(name = 'bl1', config = "lattice = (q d0 q1)")
#bl.setConf("lattice = (d,q,b)", type = 'simu')
#print bl
# METHOD 2: CAN get all configurations
# use 'Models' class of 'models' module
# change mode to be 'simu' to start simulation mode,
# 'online' mode will trig EPICS get/put processes when control configurations
# could be found in elements' configuration.
latline_online = beamline.Models(name = 'blchi', mode = 'online')
qline = (D0, Q1, D0)
chi = (B1, D0, B2, D0, D0, B3, D0, B4)
latline_online.addElement(q, qline, chi, qline)
latline_online.draw(showfig=True,mode='fancy')
#latline_online.initPos()
import sys
sys.exit(1)
# show defined elements number
#print beamline.MagBlock.sumObjNum()
# get 'b1' element from created model
eleb1 = latline_online.getElementsByName('b1')[0]
print eleb1.name
# change b1 configuration, e.g. angle
eleb1.setConf('angle=0.5', type = 'simu')
eleb1.printConfig()
# print out all added elements
latline_online.printAllElements()
# get configuration of 'Q1'
print latline_online.getAllConfig(fmt='dict')['Q1']
eleb1.printConfig()
eleQ1all = latline_online.getElementsByName('Q1')
#map(lambda x: x.setStyle(fc='orange'), eleQ1all)
eleQ1 = latline_online.getElementsByName('Q1')[0]
eleQ1.printConfig(type='all')
# update Q1's EPICS PV value
latline_online.putCtrlConf(eleQ1, 'k1', 2.5, type = 'real')
eleQ1.printConfig(type='all')
latline_online.getAllConfig(fmt='dict')
# ### Section 2: Lattice modeling
# #### STEP 4: create Lattice instance, make simulation required input files
eleb1.setConf('angle=0.1', type = 'simu')
# e.g. '.lte' for elegant tracking, require all configurations
latins = beamline.Lattice(latline_online.getAllConfig())
latfile = os.path.join(os.getcwd(), 'tracking/test.lte')
latins.generateLatticeFile(latline_online.name, latfile)
latins.dumpAllElements()
# #### STEP 5: simulation with generated lattice file
simpath = os.path.join(os.getcwd(), 'tracking')
elefile = os.path.join(simpath, 'test.ele')
h5out = os.path.join(simpath, 'tpout.h5')
elesim = beamline.Simulator()
elesim.setMode('elegant')
elesim.setScript('runElegant.sh')
elesim.setExec('elegant')
elesim.setPath(simpath)
elesim.setInputfiles(ltefile = latfile, elefile = elefile)
elesim.doSimulation()
# data columns could be extracted from simulation output files, to memory or h5 files.
data_tp = elesim.getOutput(file = 'test.out', data = ('t', 'p' ))#, dump = h5out)
data_sSx = elesim.getOutput(file = 'test.sig', data = ('s', 'Sx' ))
data_setax = elesim.getOutput(file = 'test.twi', data = ('s', 'etax'))
# #### visualize data
fig = plt.figure(1)
ax1 = fig.add_subplot(221)
ax1.plot(data_tp[:,0],data_tp[:,1],'.')
ax1.set_xlabel('$t\,[s]$')
ax1.set_ylabel('$\gamma$')
ax2 = fig.add_subplot(222)
ax2.plot(data_sSx[:,0],data_sSx[:,1],'-')
ax2.set_ylabel('$\sigma_x\,[\mu m]$')
ax2.set_xlabel('$s\,[m]$')
ax3 = fig.add_subplot(223)
ax3.plot(data_setax[:,0],data_setax[:,1],'r-', lw=3,)
ax3.set_ylabel('$\eta_{x}\,[m]$')
ax3.set_xlabel('$s\,[m]$')
"""
# Scan parameter: final Dx v.s. angle of B1
import numpy as np
dx = []
thetaArray = np.linspace(0.05,0.3,20)
for theta in thetaArray:
eleb1.setConf({'angle':theta}, type = 'simu')
latins = beamline.Lattice(latline_online.getAllConfig())
latins.generateLatticeFile(latline_online.name, latfile)
elesim.doSimulation()
data = elesim.getOutput(file = 'test.twi', data = (['etax']))
dx.append(data[-1])
dxArray = np.array(dx)
plt.plot(thetaArray, dxArray, 'r')
"""
# #### Lattice layout visualization
# generate lattice drawing plotting objects
ptches, anotes, xr, yr = latline_online.draw(mode='fancy', showfig=False)
# show drawing at the
ax3t = ax3.twinx()
[ax3t.add_patch(i) for i in ptches]
xr3 = ax3.get_xlim()
yr3 = ax3.get_ylim()
x0, x1 = min(xr[0],xr3[0]), max(xr[1], xr3[1])
y0, y1 = min(yr[0],yr3[0]), max(yr[1], yr3[1])
ax3t.set_xlim(x0, x1)
ax3t.set_ylim(y0, y1*5)
ax3.set_xlim(x0, x1)
ax3.set_ylim(y0, y1)
ax3.grid()
# show lattice drawing in a single plot
newptches = beamline.MagBlock.copy_patches(ptches)
#for i,val in enumerate(newptches):
# print id(newptches[i]), id(ptches[i])
ax4 = fig.add_subplot(224)
[ax4.add_patch(i) for i in newptches]
ax4.set_xlim(x0*1.1, x1*1.1)
ax4.set_ylim(y0*1.1, y1*1.1)
plt.show()
| mit | 9,197,368,168,278,072,000 | 32.136667 | 598 | 0.67458 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/model/migrate/versions/0056_workflow_outputs.py | 1 | 1144 | """
Migration script to create tables for adding explicit workflow outputs.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import logging
logging.basicConfig( level=logging.DEBUG )
log = logging.getLogger( __name__ )
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
metadata = MetaData()
WorkflowOutput_table = Table( "workflow_output", metadata,
Column( "id", Integer, primary_key=True ),
Column( "workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column( "output_name", String(255), nullable=True))
tables = [WorkflowOutput_table]
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
for table in tables:
try:
table.create()
except:
log.warn( "Failed to create table '%s', ignoring (might result in wrong schema)" % table.name )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
for table in tables:
table.drop()
| gpl-3.0 | 321,194,510,091,218,500 | 27.6 | 107 | 0.693182 | false |
dc3-plaso/plaso | plaso/cli/extract_analyze_tool.py | 1 | 2810 | # -*- coding: utf-8 -*-
"""The extraction and analysis CLI tool."""
import datetime
import os
from plaso.cli import status_view_tool
from plaso.lib import errors
class ExtractionAndAnalysisTool(status_view_tool.StatusViewTool):
"""Class that implements a combined extraction and analysis CLI tool."""
def __init__(self, input_reader=None, output_writer=None):
"""Initializes the CLI tool object.
Args:
input_reader (InputReader): the input reader, where None represents stdin.
output_writer (OutputWriter): the output writer, where None represents
stdout.
"""
super(ExtractionAndAnalysisTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._storage_file_path = None
def _GenerateStorageFileName(self):
"""Generates a name for the storage file.
The result use a timestamp and the basename of the source path.
Raises:
BadConfigOption: raised if the source path is not set.
"""
if not self._source_path:
raise errors.BadConfigOption(u'Please define a source (--source).')
timestamp = datetime.datetime.now()
datetime_string = timestamp.strftime(u'%Y%m%dT%H%M%S')
source_path = os.path.abspath(self._source_path)
source_name = os.path.basename(source_path)
if source_path.endswith(os.path.sep):
source_path = os.path.dirname(source_path)
if source_path == os.path.sep:
# The user passed the filesystem's root as source
source_name = u'ROOT'
else:
source_name = os.path.basename(source_path)
return u'{0:s}-{1:s}.plaso'.format(datetime_string, source_name)
def _ParseStorageFileOptions(self, options):
"""Parses the storage file options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._storage_file_path = self.ParseStringOption(options, u'storage_file')
if not self._storage_file_path:
self._storage_file_path = self._GenerateStorageFileName()
def AddStorageFileOptions(self, argument_group):
"""Adds the storage file options to the argument group.
Args:
argument_group (argparse._ArgumentGroup or argparse.ArgumentParser):
argument group or argument parser.
"""
argument_group.add_argument(
u'--storage_file', action=u'store', metavar=u'STORAGE_FILE', nargs=u'?',
type=str, default=None, help=u'The path of the storage file.')
def ParseOptions(self, options):
"""Parses tool specific options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
super(ExtractionAndAnalysisTool, self).ParseOptions(options)
self._ParseStorageFileOptions(options)
| apache-2.0 | 8,326,963,996,448,086,000 | 30.931818 | 80 | 0.688968 | false |
lmarent/network_agents_ver2_python | agents/GraphExecution.py | 1 | 4105 | import multiprocessing
from ProviderAgentException import ProviderException
import MySQLdb
import logging
import foundation.agent_properties
logger = logging.getLogger('presenter_application')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('presenter_logs.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def load_offered_data(cursor, offer_variable):
if offer_variable != None:
sql_variable = "select name, type, function, decision_variable_id \
from simulation_offeringdata \
where id = '%d' " % (offer_variable)
cursor.execute(sql_variable)
# Fetch all the rows in a list of lists.
variables_res = cursor.fetchall()
variable_def = {}
for row in variables_res:
variable_def['id'] = offer_variable
variable_def['name'] = row[0]
variable_def['type'] = row[1]
variable_def['function'] = row[2]
variable_def['decision_variable'] = str(row[3])
else:
variable_def = {}
return variable_def
def load_graphics(cursor2, cursor3, graphics):
sql_graphics = "select b.graphic_id, c.name, b.detail, b.label_id, \
b.color_id, b.x_axis_id, b.y_axis_id, b.column1_id, \
b.column2_id, b.column3_id, b.column4_id \
from simulation_presenter_graphic a, \
simulation_axis_graphic b, \
simulation_graphic c \
where a.graphic_id = b.graphic_id \
and a.presenter_id = ( select d.id \
from simulation_presenter d \
limit 1 ) \
and c.id = a.graphic_id \
order by b.graphic_id"
cursor2.execute(sql_graphics)
# Fetch all the rows in a list of lists.
graphics_res = cursor2.fetchall()
for row in graphics_res:
# Establish detail property
if (row[2] == 1):
detail = True
else:
detail = False
# Establish label property
if ( row[3] > 0 ):
label = load_offered_data(cursor3, row[3])
else:
label = None
# Establish color property
if ( row[4] > 0 ):
color = load_offered_data(cursor3, row[4])
colors = {}
else:
color = None
variable_x = load_offered_data(cursor3, row[5])
variable_y = load_offered_data(cursor3, row[6])
column1 = load_offered_data(cursor3, row[7])
column2 = load_offered_data(cursor3, row[8])
column3 = load_offered_data(cursor3, row[9])
column4 = load_offered_data(cursor3, row[10])
graphics[row[0]] = {'name': row[1], 'detail': detail,
'x_axis' : variable_x, 'y_axis' : variable_y,
'label' : label, 'color' : color,
'instance_colors' : colors, 'column1' : column1,
'column2' : column2, 'column3' : column3, 'column4' : column4}
if __name__ == '__main__':
'''
The PresenterExecution starts the threads for the presenter
agents.
'''
try:
# Open database connection
db = MySQLdb.connect(foundation.agent_properties.addr_database,foundation.agent_properties.user_database,
foundation.agent_properties.user_password,foundation.agent_properties.database_name )
# prepare a cursor object using cursor() method
cursor = db.cursor()
graphics = {}
cursor3 = db.cursor()
cursor4 = db.cursor()
logger.info('Ready to load Graphics')
load_graphics(cursor3, cursor4, graphics)
print graphics
logger.info('Graphics loaded')
except ProviderException as e:
print e.__str__()
except Exception as e:
print e.__str__()
| mit | -8,014,453,808,743,486,000 | 38.471154 | 114 | 0.552741 | false |
aniversarioperu/secretariobot | setup.py | 1 | 1462 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='secretariobot',
version='0.0.0',
description='Secretario Bot',
long_description=readme + '\n\n' + history,
author='AniversarioPeru',
author_email='[email protected]',
url='https://github.com/aniversarioperu/secretariobot',
packages=[
'secretariobot',
],
package_dir={'secretariobot':
'secretariobot'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='secretariobot',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| bsd-3-clause | -2,511,109,087,706,356,000 | 26.074074 | 66 | 0.619015 | false |
jrsmith3/refmanage | test/test_bibfile.py | 1 | 6481 | # -*- coding: utf-8 -*-
import unittest
import pathlib2 as pathlib
from refmanage import BibFile
from refmanage.ref_exceptions import UnparseableBibtexError
from pybtex.database import BibliographyData
# Base classes
# ============
class Base(unittest.TestCase):
"""
Base class for tests
This class is intended to be subclassed so that the same `setUp` method does not have to be rewritten for each class containing tests.
"""
def setUp(self):
"""
Create `Path`s to various control data
"""
self.empty = pathlib.Path("test/controls/empty.bib")
self.one = pathlib.Path("test/controls/one.bib")
self.two = pathlib.Path("test/controls/two.bib")
self.invalid = pathlib.Path("test/controls/invalid.bib")
self.one_valid_one_invalid = pathlib.Path("test/controls/one_valid_one_invalid.bib")
class Instantiation(Base):
"""
Test all aspects of instantiating an object
Includes input of wrong type, input outside of a bound, etc.
"""
def test_no_input(self):
"""
refmanage.BibFile should raise TypeError if instantiated with no input
"""
with self.assertRaises(TypeError):
BibFile()
def test_invalid_bibtex(self):
"""
refmanage.BibFile should raise UnparseableBibtexError if instantiated with a path to an unparseable file.
"""
with self.assertRaises(UnparseableBibtexError):
BibFile(self.invalid)
def test_one_valid_one_invalid_bib_type(self):
"""
refmanage.BibFile should raise UnparseableBibtexError if instantiated with a path to a file containing both valid and invalid BibTeX
"""
with self.assertRaises(UnparseableBibtexError):
BibFile(self.one_valid_one_invalid)
class Attributes(Base):
"""
Test attributes of BibFile
These tests include type checks, setting immutable attributes, etc.
"""
# Type checking
# =============
def test_path_type(self):
"""
refmanage.BibFile.path should be of type `pathlib.Path`
"""
b = BibFile(self.empty)
self.assertIsInstance(b.path, pathlib.Path)
def test_src_txt_type(self):
"""
refmanage.BibFile.src_txt should be of type unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.src_txt, unicode)
def test_bib_type(self):
"""
refmanage.BibFile.bib should be of type `pybtex.database.BibliographyData`
"""
b = BibFile(self.two)
self.assertIsInstance(b.bib, BibliographyData)
# Immutability
# ============
# The `path`, `bib`, and `src_txt` should be immutable once the `BibFile` object has been created. In other words, these attributes should not be changeable after the fact.
def test_path_immutability(self):
"""
Attempting to set `refmanage.BibFile.path` should raise AttributeError
"""
b = BibFile(self.one)
try:
b.path = self.empty
except AttributeError:
# Attempting to set `path` attribute raises an error; test passed!
pass
else:
self.fail("BibFile.path can be set after instantiation")
def test_bib_immutability(self):
"""
Attempting to set `refmanage.BibFile.bib` should raise AttributeError
"""
b = BibFile(self.one)
bib = b.bib
try:
b.bib = bib
except AttributeError:
# Attempting to set `path` attribute raises an error; test passed!
pass
else:
self.fail("BibFile.bib can be set after instantiation")
def test_src_txt_immutability(self):
"""
Attempting to set `refmanage.BibFile.src_txt` should raise AttributeError
"""
b = BibFile(self.one)
try:
b.src_txt = "legitimate text string"
except AttributeError:
# Attempting to set `path` attribute raises an error; test passed!
pass
else:
self.fail("BibFile.src_txt can be set after instantiation")
# Value checking
# ==============
def test_empty_file_bib_length(self):
"""
refmanage.BibFile.bib should contain zero entries if instantiated with an empty file
"""
b = BibFile(self.empty)
self.assertEqual(len(b.bib.entries), 0)
def test_one_entry_bibtex_file_bib_length(self):
"""
refmanage.BibFile.bib should contain one entry if instantiated with a file containing valid BibTeX with a single entry
"""
b = BibFile(self.one)
self.assertEqual(len(b.bib.entries), 1)
def test_two_entries_bibtex_file_bib_length(self):
"""
refmanage.BibFile.bib should contain two entries if instantiated with a file containing valid BibTeX with two entries
"""
b = BibFile(self.two)
self.assertEqual(len(b.bib.entries), 2)
class MethodsInput(unittest.TestCase):
"""
Tests methods which take input parameters
Tests include: passing invalid input, etc.
"""
pass
class MethodsReturnType(Base):
"""
Tests methods' output types
"""
def test_terse_msg(self):
"""
refmanage.BibFile.terse_msg() should return a unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.terse_msg(), unicode)
def test_verbose_msg(self):
"""
refmanage.BibFile.verbose_msg() should return a unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.verbose_msg(), unicode)
def test_test_msg_verbose_false(self):
"""
refmanage.BibFile.test_msg(verbose=False) should return a unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.test_msg(False), unicode)
def test_test_msg_verbose_true(self):
"""
refmanage.BibFile.test_msg(verbose=True) should return a unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.test_msg(True), unicode)
class MethodsReturnValues(Base):
"""
Tests values of methods against known values
"""
def test_verbose_msg_valid_bibtex(self):
"""
refmanage.BibFile.verbose_msg() should return a str of zero length for an argument pointing to valid BibTeX.
"""
b = BibFile(self.two)
self.assertEqual(len(b.verbose_msg()), 0)
| mit | 5,419,868,417,544,243,000 | 30.926108 | 176 | 0.618732 | false |
Newsboy-VA/Newsboy-Core | core/core.py | 1 | 1657 | #!/usr/bin/env python3
import asyncio
import sys
import logging
import argparse
from nlu import NLU
from client_communication import VAClientHandler
from module_communication import VAModuleHandler
class VirtualAssistant(object):
''' '''
def __init__(self):
parser = argparse.ArgumentParser(
description='Start the Virtual Assistant Core.')
parser.add_argument('--port', type=int, default=55801)
parser.add_argument('--log-level', type=str.upper, default='INFO')
args = parser.parse_args()
self.log_level = args.log_level.lower()
FORMAT = '%(asctime)-15s %(levelname)-5s (PID %(process)d) %(message)s'
logging.basicConfig(
filename='{}.log'.format(self.log_level.lower()),
level=getattr(logging, self.log_level.upper()),
format=FORMAT,
)
self.loop = asyncio.get_event_loop()
self.nlu = NLU()
self.client_handler = VAClientHandler(self, args.port)
self.module_handler = VAModuleHandler(self, args.port+1)
def __enter__(self):
self.loop.run_forever()
return self
def __exit__(self, type, value, traceback):
logging.info("Shutting down...")
# Close the servers
self.client_handler.close()
self.module_handler.close()
self.loop.stop()
if sys.version_info[1] >= 6:
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
logging.shutdown()
return isinstance(value, KeyboardInterrupt)
if __name__ == "__main__":
with VirtualAssistant() as VA:
pass
| gpl-3.0 | 4,354,760,709,866,121,000 | 27.084746 | 79 | 0.613156 | false |
eshook/Forest | unittests/test_Bobs.py | 1 | 1255 | """
Copyright (c) 2017 Eric Shook. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
@author: eshook (Eric Shook, [email protected])
@contributors: <Contribute and add your name here!>
"""
from forest import *
import unittest
# Test forest/bobs/Bob.py
class TestBobs(unittest.TestCase):
def setUp(self):
self.raster100 = Raster(0,0,100,100,cellsize = 1,nrows = 100, ncols = 100)
# FIXME: Test for these cases too
#self.bobneg = Bob(-1, -1, 99, 99)
#self.bob10 = Bob(10,10,10,10)
#self.bobrect = Bob(10,20,30,40) # y=10, x=20, h=30, w=40
# FIXME: Test negative nrows, ncols, cellsize
def test_basic_raster_initial_setup(self):
self.assertEqual(self.raster100.y,0)
self.assertEqual(self.raster100.x,0)
self.assertEqual(self.raster100.h,100)
self.assertEqual(self.raster100.w,100)
self.assertEqual(self.raster100.nrows,100)
self.assertEqual(self.raster100.ncols,100)
self.assertEqual(self.raster100.cellsize,1)
# Create the TestBobs suite
test_Bobs_suite = unittest.TestLoader().loadTestsFromTestCase(TestBobs)
| bsd-3-clause | -2,300,860,208,043,709,200 | 32.026316 | 97 | 0.650199 | false |
josiah-wolf-oberholtzer/discograph | discograph/ui.py | 1 | 3715 | # -*- encoding: utf-8 -*-
import json
from flask import Blueprint
from flask import current_app
from flask import make_response
from flask import request
from flask import render_template
from flask import url_for
from discograph import exceptions
from discograph import helpers
blueprint = Blueprint('ui', __name__, template_folder='templates')
default_roles = (
'Alias',
'Member Of',
'Sublabel Of',
'Released On',
)
@blueprint.route('/')
def route__index():
import discograph
app = current_app._get_current_object()
is_a_return_visitor = request.cookies.get('is_a_return_visitor')
initial_json = 'var dgData = null;'
on_mobile = request.MOBILE
parsed_args = helpers.parse_request_args(request.args)
original_roles, original_year = parsed_args
if not original_roles:
original_roles = default_roles
multiselect_mapping = discograph.CreditRole.get_multiselect_mapping()
url = url_for(
request.endpoint,
roles=original_roles,
)
rendered_template = render_template(
'index.html',
application_url=app.config['APPLICATION_ROOT'],
initial_json=initial_json,
is_a_return_visitor=is_a_return_visitor,
multiselect_mapping=multiselect_mapping,
og_title='Disco/graph: visualizing music as a social graph',
og_url=url,
on_mobile=on_mobile,
original_roles=original_roles,
original_year=original_year,
title='Disco/graph: Visualizing music as a social graph',
)
response = make_response(rendered_template)
response.set_cookie('is_a_return_visitor', 'true')
return response
@blueprint.route('/<entity_type>/<int:entity_id>')
def route__entity_type__entity_id(entity_type, entity_id):
import discograph
app = current_app._get_current_object()
parsed_args = helpers.parse_request_args(request.args)
original_roles, original_year = parsed_args
if not original_roles:
original_roles = default_roles
if entity_type not in ('artist', 'label'):
raise exceptions.APIError(message='Bad Entity Type', status_code=404)
on_mobile = request.MOBILE
data = helpers.get_network(
entity_id,
entity_type,
on_mobile=on_mobile,
cache=True,
roles=original_roles,
)
if data is None:
raise exceptions.APIError(message='No Data', status_code=500)
initial_json = json.dumps(
data,
sort_keys=True,
indent=4,
separators=(',', ': '),
)
initial_json = 'var dgData = {};'.format(initial_json)
entity_name = data['center']['name']
is_a_return_visitor = request.cookies.get('is_a_return_visitor')
key = '{}-{}'.format(entity_type, entity_id)
#url = '/{}/{}'.format(entity_type, entity_id)
url = url_for(
request.endpoint,
entity_type=entity_type,
entity_id=entity_id,
roles=original_roles,
)
title = 'Disco/graph: {}'.format(entity_name)
multiselect_mapping = discograph.CreditRole.get_multiselect_mapping()
rendered_template = render_template(
'index.html',
application_url=app.config['APPLICATION_ROOT'],
initial_json=initial_json,
is_a_return_visitor=is_a_return_visitor,
key=key,
multiselect_mapping=multiselect_mapping,
og_title='Disco/graph: The "{}" network'.format(entity_name),
og_url=url,
on_mobile=on_mobile,
original_roles=original_roles,
original_year=original_year,
title=title,
)
response = make_response(rendered_template)
response.set_cookie('is_a_return_visitor', 'true')
return response | mit | 2,357,549,400,716,320,300 | 31.596491 | 77 | 0.644415 | false |
cl4u2/chirp | chirp/xml_ll.py | 1 | 7989 | # Copyright 2008 Dan Smith <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from chirp import chirp_common, errors
def get_memory(doc, number):
"""Extract a Memory object from @doc"""
ctx = doc.xpathNewContext()
base = "//radio/memories/memory[@location=%i]" % number
fields = ctx.xpathEval(base)
if len(fields) > 1:
raise errors.RadioError("%i memories claiming to be %i" % (len(fields),
number))
elif len(fields) == 0:
raise errors.InvalidMemoryLocation("%i does not exist" % number)
memnode = fields[0]
def _get(ext):
path = base + ext
result = ctx.xpathEval(path)
if result:
return result[0].getContent()
else:
return ""
if _get("/mode/text()") == "DV":
mem = chirp_common.DVMemory()
mem.dv_urcall = _get("/dv/urcall/text()")
mem.dv_rpt1call = _get("/dv/rpt1call/text()")
mem.dv_rpt2call = _get("/dv/rpt2call/text()")
try:
mem.dv_code = _get("/dv/digitalCode/text()")
except ValueError:
mem.dv_code = 0
else:
mem = chirp_common.Memory()
mem.number = int(memnode.prop("location"))
mem.name = _get("/longName/text()")
mem.freq = chirp_common.parse_freq(_get("/frequency/text()"))
mem.rtone = float(_get("/squelch[@id='rtone']/tone/text()"))
mem.ctone = float(_get("/squelch[@id='ctone']/tone/text()"))
mem.dtcs = int(_get("/squelch[@id='dtcs']/code/text()"), 10)
mem.dtcs_polarity = _get("/squelch[@id='dtcs']/polarity/text()")
try:
sql = _get("/squelchSetting/text()")
if sql == "rtone":
mem.tmode = "Tone"
elif sql == "ctone":
mem.tmode = "TSQL"
elif sql == "dtcs":
mem.tmode = "DTCS"
else:
mem.tmode = ""
except IndexError:
mem.tmode = ""
dmap = {"positive" : "+", "negative" : "-", "none" : ""}
dupx = _get("/duplex/text()")
mem.duplex = dmap.get(dupx, "")
mem.offset = chirp_common.parse_freq(_get("/offset/text()"))
mem.mode = _get("/mode/text()")
mem.tuning_step = float(_get("/tuningStep/text()"))
skip = _get("/skip/text()")
if skip == "none":
mem.skip = ""
else:
mem.skip = skip
#FIXME: bank support in .chirp files needs to be re-written
#bank_id = _get("/bank/@bankId")
#if bank_id:
# mem.bank = int(bank_id)
# bank_index = _get("/bank/@bankIndex")
# if bank_index:
# mem.bank_index = int(bank_index)
return mem
def set_memory(doc, mem):
"""Set @mem in @doc"""
ctx = doc.xpathNewContext()
base = "//radio/memories/memory[@location=%i]" % mem.number
fields = ctx.xpathEval(base)
if len(fields) > 1:
raise errors.RadioError("%i memories claiming to be %i" % (len(fields),
mem.number))
elif len(fields) == 1:
fields[0].unlinkNode()
radio = ctx.xpathEval("//radio/memories")[0]
memnode = radio.newChild(None, "memory", None)
memnode.newProp("location", "%i" % mem.number)
sname_filter = "[^A-Z0-9/ >-]"
sname = memnode.newChild(None, "shortName", None)
sname.addContent(re.sub(sname_filter, "", mem.name.upper()[:6]))
lname_filter = "[^.A-Za-z0-9/ >-]"
lname = memnode.newChild(None, "longName", None)
lname.addContent(re.sub(lname_filter, "", mem.name[:16]))
freq = memnode.newChild(None, "frequency", None)
freq.newProp("units", "MHz")
freq.addContent(chirp_common.format_freq(mem.freq))
rtone = memnode.newChild(None, "squelch", None)
rtone.newProp("id", "rtone")
rtone.newProp("type", "repeater")
tone = rtone.newChild(None, "tone", None)
tone.addContent("%.1f" % mem.rtone)
ctone = memnode.newChild(None, "squelch", None)
ctone.newProp("id", "ctone")
ctone.newProp("type", "ctcss")
tone = ctone.newChild(None, "tone", None)
tone.addContent("%.1f" % mem.ctone)
dtcs = memnode.newChild(None, "squelch", None)
dtcs.newProp("id", "dtcs")
dtcs.newProp("type", "dtcs")
code = dtcs.newChild(None, "code", None)
code.addContent("%03i" % mem.dtcs)
polr = dtcs.newChild(None, "polarity", None)
polr.addContent(mem.dtcs_polarity)
sset = memnode.newChild(None, "squelchSetting", None)
if mem.tmode == "Tone":
sset.addContent("rtone")
elif mem.tmode == "TSQL":
sset.addContent("ctone")
elif mem.tmode == "DTCS":
sset.addContent("dtcs")
dmap = {"+" : "positive", "-" : "negative", "" : "none"}
dupx = memnode.newChild(None, "duplex", None)
dupx.addContent(dmap[mem.duplex])
oset = memnode.newChild(None, "offset", None)
oset.newProp("units", "MHz")
oset.addContent(chirp_common.format_freq(mem.offset))
mode = memnode.newChild(None, "mode", None)
mode.addContent(mem.mode)
step = memnode.newChild(None, "tuningStep", None)
step.newProp("units", "kHz")
step.addContent("%.5f" % mem.tuning_step)
if mem.skip:
skip = memnode.newChild(None, "skip", None)
skip.addContent(mem.skip)
#FIXME: .chirp bank support needs to be redone
#if mem.bank is not None:
# bank = memnode.newChild(None, "bank", None)
# bank.newProp("bankId", str(int(mem.bank)))
# if mem.bank_index >= 0:
# bank.newProp("bankIndex", str(int(mem.bank_index)))
if isinstance(mem, chirp_common.DVMemory):
dv = memnode.newChild(None, "dv", None)
ur = dv.newChild(None, "urcall", None)
ur.addContent(mem.dv_urcall)
r1 = dv.newChild(None, "rpt1call", None)
if mem.dv_rpt1call and mem.dv_rpt1call != "*NOTUSE*":
r1.addContent(mem.dv_rpt1call)
r2 = dv.newChild(None, "rpt2call", None)
if mem.dv_rpt2call and mem.dv_rpt2call != "*NOTUSE*":
r2.addContent(mem.dv_rpt2call)
dc = dv.newChild(None, "digitalCode", None)
dc.addContent(str(mem.dv_code))
def del_memory(doc, number):
"""Remove memory @number from @doc"""
path = "//radio/memories/memory[@location=%i]" % number
ctx = doc.xpathNewContext()
fields = ctx.xpathEval(path)
for field in fields:
field.unlinkNode()
def _get_bank(node):
bank = chirp_common.Bank(node.prop("label"))
ident = int(node.prop("id"))
return ident, bank
def get_banks(doc):
"""Return a list of banks from @doc"""
path = "//radio/banks/bank"
ctx = doc.xpathNewContext()
fields = ctx.xpathEval(path)
banks = []
for field in fields:
banks.append(_get_bank(field))
def _cmp(itema, itemb):
return itema[0] - itemb[0]
banks.sort(cmp=_cmp)
return [x[1] for x in banks]
def set_banks(doc, banklist):
"""Set the list of banks in @doc"""
path = "//radio/banks/bank"
ctx = doc.xpathNewContext()
fields = ctx.xpathEval(path)
for field in fields:
field.unlinkNode()
path = "//radio/banks"
ctx = doc.xpathNewContext()
banks = ctx.xpathEval(path)[0]
i = 0
for bank in banklist:
banknode = banks.newChild(None, "bank", None)
banknode.newProp("id", "%i" % i)
banknode.newProp("label", "%s" % bank)
i += 1
| gpl-3.0 | 3,356,739,449,751,182,300 | 30.956 | 79 | 0.587182 | false |
tedkulp/bossogg | boss3/Database.py | 1 | 40212 | #Boss Ogg - A Music Server
#(c)2003 by Ted Kulp ([email protected])
#This project's homepage is: http://bossogg.wishy.org
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sqlite
import time
import os, sys
from boss3.bossexceptions import EndOfQueueException
from boss3.util import bosslog
from boss3.util.Session import *
import time
from boss3.util import UTFstring
import os
import string
import types
import thread
import threading
from boss3.metadata import id3
from boss3.metadata.id3 import getTag
try:
import boss3.ripper.ripper
from boss3.ripper.ripper import getCDDB
from boss3.ripper.ripper import pyrip
from boss3.ripper.ripper import pyrip_update
except Exception:
pass
log = bosslog.getLogger()
sql_lock = threading.RLock()
class Database:
conn = ""
dbname = ""
songcache = []
curindex = -1
genrecache = {}
artistcache = {}
albumcache = {}
getgenrestatus = False
getartiststatus = False
getmetaartiststatus = False
getalbumstatus = False
getsongstatus = False
#cursong = None
import_cursor = None
tables = {}
class _Cursor(sqlite.Cursor):
nolock=0
def getcaller(self):
f=sys._getframe(1)
f=f.f_back
return os.path.basename(f.f_code.co_filename), f.f_lineno
def execute(self, SQL, *args, **kwargs):
needlock=0
#if len(SQL)>0 and SQL.split()[0].lower() in ["delete", "update", "insert", "commit"] and not self.nolock:
needlock=1
if needlock:
sql_lock.acquire()
log.debug("lock", "Acquire lock for database writes", stack=1)
try:
log.debug("sqlquery", "SQL: "+SQL, stack=1, *args)
sqlite.Cursor.execute(self, SQL, *args)
except:
log.exception("SQL ERROR")
if "raise_except" in kwargs and kwargs["raise_except"] == 1:
if needlock:
sql_lock.release()
log.debug("lock", "Release lock for database writes", stack=1)
raise
if needlock:
sql_lock.release()
log.debug("lock", "Release lock for database writes", stack=1)
def begin(self):
self.execute("BEGIN TRANSACTION")
def commit(self):
self.execute("COMMIT TRANSACTION")
def rollback(self):
self.execute("ROLLBACK TRANSACTION")
def _cursor(self):
self.conn._checkNotClosed("cursor")
return self._Cursor(self.conn, self.conn.rowclass)
def loadTableStructures(self):
self.tables = {}
log.debug("funcs", "Database.tableStructures")
cursor = self.conn.cursor()
cursor.execute("select name,sql from sqlite_master where type='table'")
for row in cursor.fetchall():
self.tables[row[0]] = []
sql = row[1].split("\n")
for line in sql[1:-1]:
data = line.split()
field = data[0]
self.tables[row[0]].append(field)
cursor.close()
log.debug("import", "Got Table data %s", self.tables)
def connect(self, autocommit=True):
if ( (self.conn == None or self.conn == "") and self.dbname != ""):
self.conn = sqlite.connect(db=self.dbname, mode=755, autocommit=autocommit)
self.conn.cursor=self._cursor
def disconnect(self):
if (self.conn != None or self.conn != ""):
self.conn.close()
self.conn = None
def runScript(self,SQL):
cursor = self.conn.cursor()
cursor.execute(SQL)
#cursor.commit()
cursor.close()
def getSchemaVersion(self):
result = -1
cursor = self.conn.cursor()
SQL = "select versionnumber from version"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result = row['versionnumber']
return result
def setSchemaVersion(self, versionnumber):
cursor = self.conn.cursor()
SQL = "update version set versionnumber = %s"
cursor.execute(SQL, versionnumber)
def loadSongCache(self):
log.debug("funcs", "Database.loadSongCache() called")
cursor = self.conn.cursor()
SQL = """
SELECT songs.songid, songs.filename, songs.songlength, songs.flags
FROM songs, albums, artists
WHERE songs.albumid = albums.albumid and albums.artistid = artists.artistid
ORDER BY artists.artistname, albums.year, albums.albumname, songs.tracknum, songs.songname"""
cursor.execute(SQL)
i = 0
self.songcache = []
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
self.songcache.append({"filename":row['songs.filename'], "songid":row['songs.songid'], "songlength":row['songs.songlength'], "flags":row['songs.flags']})
i += 1
log.debug("cache", "Loaded %s songs into cache", i)
def getSongCacheSize(self):
if self.songcache is not None:
return len(self.songcache)
def loadState(self,player):
cursor = self.conn.cursor()
SQL = "select * from currentstate"
cursor.execute(SQL)
for row in cursor.fetchall():
player.songqueue.setCurrentIndex(row['queueindex'])
SQL = "select q.songid,s.filename,s.songlength,s.flags from queue q inner join songs s on q.songid = s.songid order by q.indexid"
cursor.execute(SQL)
i=0
for row in cursor.fetchall():
player.queueSong({"filename":row['s.filename'], "songid":row['q.songid'], "songlength":row['s.songlength'], "flags":row['s.flags']})
i += 1
if i == 0:
player.songqueue.setCurrentIndex(-1)
#if player.songqueue.currentindex > -1:
# player.songqueue.currentindex -= 1
def saveState(self,player):
cursor = self.conn.cursor()
SQL = "delete from currentstate"
cursor.execute(SQL)
SQL = "insert into currentstate (queueindex, playlistid, songid, shuffle) values (%s, %s, %s, %s)"
cursor.execute(SQL, player.songqueue.getCurrentIndex(),-1,player.songid,0)
SQL = "delete from queue"
cursor.execute(SQL)
queuesongids = player.songqueue.getSongIDs()
for i in queuesongids:
SQL = "insert into queue (songid) values (%s)"
cursor.execute(SQL, i)
def getNextSong(self, newindex = -1, shuffle = 0):
try:
if (newindex == None or newindex < 0):
self.curindex += 1
if (shuffle == None or shuffle == 0):
return self.songcache[self.curindex]
except Exception:
raise EndOfQueueException.EndOfQueueException("No songs left... need to go into stop mode")
def getRandomSong(self):
cursor = self.conn.cursor()
SQL = "select songs.songid, songs.filename, songs.songlength, songs.flags from songs order by random() limit 1"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
return {"filename":row['songs.filename'], "songid":row['songs.songid'], "songlength":row['songs.songlength'], "flags":row['songs.flags']}
raise EndOfQueueException.EndOfQueueException("No songs left... need to go into stop mode")
def getArtistInfo(self, artistid):
result = {}
cursor = self.conn.cursor()
SQL = "select artistid, aristname from artists where artistid = %s"
cursor.execute(SQL, artistid)
for row in cursor.fetchall():
log.debug("sqlresult", "XRow: %s", row)
result['artistid'] = row['artistid']
result['artistname'] = row['artistname']
return result
def getAlbumInfo(self, albumid):
result = {}
cursor = self.conn.cursor()
SQL = "select a.artistid, a.aristname, al.albumid, al.albumname, al.year from albums al inner join artists a on a.artistid = al.artistid where a.artistid = %s"
cursor.execute(SQL, albumid)
for row in cursor.fetchall():
log.debug("sqlresult", "XRow: %s", row)
result['artistid'] = row['a.artistid']
result['artistname'] = row['a.artistname']
result['albumid'] = row['a.albumid']
result['albumname'] = row['a.albumname']
result['albumyear'] = row['al.year']
return result
def getSongInfo(self, songids):
resultarray = []
cursor = self.conn.cursor()
whereclause = ""
for songid in songids:
whereclause += "songid = %s or " % songid
whereclause = whereclause[:-4]
SQL = "select s.songid, al.artistid, s.albumid, s.songname, s.bitrate, s.songlength, s.tracknum, s.filesize, s.timesplayed, s.filename, s.weight, s.flags, al.albumname, al.year, a.artistname, s.metaartistid, m.artistname from songs s inner join albums al on s.albumid = al.albumid inner join artists a on s.artistid = a.artistid outer left join artists m on m.artistid = s.metaartistid where %s" % whereclause
cursor.execute(SQL)
for row in cursor.fetchall():
result = {}
log.debug("sqlresult", "XRow: %s", row)
result['songid'] = row['s.songid']
result['albumid'] = row['s.albumid']
result['artistid'] = row['al.artistid']
result['artistname'] = row['a.artistname']
result['albumname'] = row['al.albumname']
result['songname'] = row['s.songname']
result['bitrate'] = row['s.bitrate']
result['songlength'] = row['s.songlength']
result['tracknum'] = row['s.tracknum']
result['filesize'] = row['s.filesize']
result['timesplayed'] = row['s.timesplayed']
result['filename'] = row['s.filename']
result['weight'] = row['s.weight']
result['flags'] = row['s.flags']
result['albumyear'] = row['al.year']
if row['m.artistname'] != None and row['s.metaartistid'] != '-1':
result['metaartistid'] = row['s.metaartistid']
result['metaartistname'] = row['m.artistname']
resultarray.append(result)
for result in resultarray:
songid = result['songid']
SQL = "select count(*) as thecount, type, songid from history where songid = %s group by songid, type order by songid"
result['timesstarted'] = result['timesplayed'] = result['timesrequested'] = 0
cursor.execute(SQL, songid)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
if row['type'] == "s":
result['timesstarted'] = int(row['thecount'])
elif row['type'] == "p":
result['timesplayed'] = int(row['thecount'])
elif row['type'] == "q":
result['timesrequested'] = int(row['thecount'])
if result['timesplayed'] and result['timesstarted']:
result['percentagecompleted'] = (float(result['timesplayed']) / float(result['timesstarted'])) * float(100)
else:
result['percentagecompleted'] = float(0)
result['genres'] = self.fillSongGenreHash(songid)
#Now sort them in the original order...
oldresultarray = resultarray
resultarray = []
count = 0;
for songid in songids:
for i in oldresultarray:
if i['songid'] == songid:
i['index'] = count
resultarray.append(i)
count += 1
break
return resultarray
def authUser(self, username = "", password = ""):
result = None
cursor = self.conn.cursor()
SQL = "SELECT userid, authlevel FROM users "
SQL += "WHERE username = %s AND password = %s"
cursor.execute(SQL, username, password)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result = {"userid":row['userid'],"authlevel":['authlevel']}
return result
def listArtists(self, anchor=""):
result = []
cursor = self.conn.cursor()
SQL = "SELECT artistid, artistname FROM artists "
if (anchor != None and anchor != ""):
SQL += "WHERE artistname like '%s%%' " % anchor
SQL += "ORDER BY lower(artistname) ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
if row['artistname'] != '':
result.append({"artistid":row['artistid'],"artistname":row['artistname']})
return result
def listAlbums(self, artistid=None, genreid=None, anchor=""):
result = []
cursor = self.conn.cursor()
#Look for real albums first and stick them at the top of the list
if artistid != None:
SQL = "SELECT albumid, albumname, year FROM albums WHERE artistid = %i " % artistid
if (anchor != None and anchor != ""):
SQL += "AND albumname like '%s%%%%' " % anchor.replace("'", "\\'")
SQL += "ORDER BY year, lower(albumname) ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"albumid":row['albumid'],"albumname":row['albumname'],"albumyear":row['year'],"metaartist":0})
#Now look for metaartist related albums
SQL = "SELECT a.albumid, a.albumname, a.year FROM albums a INNER JOIN songs s ON a.albumid = s.albumid WHERE s.metaartistid = %s " % artistid
if (anchor != None and anchor != ""):
SQL += "AND a.albumname like '%s%%%%' " % anchor.replace("'", "\\'")
SQL += "ORDER BY a.year, lower(a.albumname) ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"albumid":row['a.albumid'],"albumname":row['a.albumname'],"albumyear":row['a.year'],"metaartist":1})
elif genreid != None:
SQL = "SELECT DISTINCT ar.artistid, ar.artistname, a.albumid, a.albumname, a.year FROM artists ar INNER JOIN albums a ON ar.artistid = a.artistid INNER JOIN songs s ON a.albumid = s.albumid INNER JOIN genre_data gd ON gd.songid = s.songid WHERE gd.genreid = %s " % genreid
if (anchor != None and anchor != ""):
SQL += "AND albumname like '%s%%%%' " % anchor.replace("'", "\\'")
SQL += "ORDER BY lower(a.albumname), lower(ar.artistname) ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistid":row['ar.artistid'],"artistname":row['ar.artistname'],"albumid":row['a.albumid'],"albumname":row['a.albumname'],"albumyear":row['a.year'],"metaartist":0})
else:
#SQL = "SELECT ar.artistid, ar.artistname, a.albumid, a.albumname, a.year FROM albums a INNER JOIN artists ar ON ar.artistid = a.artistid ORDER BY ar.artistname, a.year, lower(a.albumname) ASC"
SQL = "SELECT ar.artistid, ar.artistname, a.albumid, a.albumname, a.year FROM albums a INNER JOIN artists ar ON ar.artistid = a.artistid ORDER BY lower(a.albumname), ar.artistname ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistid":row['ar.artistid'],"artistname":row['ar.artistname'],"albumid":row['a.albumid'],"albumname":row['a.albumname'],"albumyear":row['a.year'],"metaartist":0})
return result
def listPlaylists(self):
result = []
cursor = self.conn.cursor()
SQL = "SELECT playlistid, name, userid FROM playlists order by playlistid"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"playlistid":row['playlistid'],"playlistname":row['name'],"userid":row['userid']})
return result
def listGenres(self):
result = []
cursor = self.conn.cursor()
SQL = "SELECT genreid, genrename FROM genres order by genreid"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"genreid":row['genreid'],"genrename":row['genrename']})
return result
def incrementTimesStarted(self, songid):
cursor = self.conn.cursor()
SQL = "INSERT INTO HISTORY (songid, type, time) VALUES (%s, 's', %s)"
cursor.execute(SQL, songid, time.time())
def incrementTimesPlayed(self, songid):
cursor = self.conn.cursor()
SQL = "UPDATE songs SET timesplayed = timesplayed + 1 where songid = %s"
cursor.execute(SQL, songid)
SQL = "INSERT INTO HISTORY (songid, type, time) VALUES (%s, 'p', %s)"
cursor.execute(SQL, songid, time.time())
def getIds(self, idtype, theid):
result = []
cursor = self.conn.cursor()
SQL = "select s.filename, s.songid, s.songlength, s.flags from songs s "
if idtype == "artistid":
SQL += ", albums a where s.artistid = %d and s.albumid = a.albumid order by a.year, a.albumname, s.tracknum, s.songname" % theid
elif idtype == "albumid":
SQL += "where albumid = %d order by tracknum, songname" % theid
elif idtype == "songid":
SQL += "where songid = %d" % theid
elif idtype == "playlistid":
SQL += ", playlistdata p where p.songid = s.songid and p.playlistid = %d order by p.indexid" % theid
elif idtype == "genreid":
SQL += "INNER JOIN genre_data gd ON s.songid = gd.songid WHERE gd.genreid = %d ORDER BY s.songid" % theid
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"filename":row['s.filename'], "songid":row['s.songid'], "songlength":row['s.songlength'], "flags":row['s.flags']})
#Now grab metaartist related songs if artistid is given
if idtype == "artistid":
SQL = "select s.filename, s.songid, s.songlength, s.flags from songs s, albums a where s.metaartistid = %d and s.albumid = a.albumid order by a.year, a.albumname, s.tracknum, s.songname"
cursor.execute(SQL, theid)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"filename":row['s.filename'], "songid":row['s.songid'], "songlength":row['s.songlength'], "flags":row['s.flags']})
return result
def setQueueHistoryOnId(self, songid, userid=-1):
cursor = self.conn.cursor()
SQL = "INSERT INTO HISTORY (songid, type, time, userid) VALUES (%s, 'q', %s, %s)"
cursor.execute(SQL, songid, time.time(), userid)
def createPlaylist(self, name):
cursor = self.conn.cursor()
SQL = "SELECT playlistid from playlists where name = %s"
cursor.execute(SQL, name)
exists = -1
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
exists = 1
if (exists == -1):
now=time.time()
SQL = "INSERT into playlists (name,userid,create_date,modified_date) values (%s,-1,%s,%s)"
cursor.execute(SQL, name, now, now)
SQL = "SELECT playlistid from playlists where name = %s"
cursor.execute(SQL, name)
for row in cursor.fetchall():
exists = row['playlistid']
else:
exists = -1
return exists
def removePlaylist(self, playlistid):
cursor = self.conn.cursor()
SQL = "delete from playlistdata where playlistid = %s"
cursor.execute(SQL, playlistid)
SQL = "delete from playlists where playlistid = %s"
cursor.execute(SQL, playlistid)
def listSongs(self, artistid=None, albumid=None, playlistid=None, anchor="", getgenres=True):
log.debug("funcs", "Database.listSongs()")
result = []
cursor = self.conn.cursor()
SQL = "SELECT s.songid, s.artistid, ar.artistname, s.albumid, s.tracknum, s.songname, s.filename, s.filesize, s.bitrate, s.songlength, s.timesplayed, a.albumname, a.year, s.metaartistid, m.artistname"
if (playlistid != None and playlistid != ""):
SQL += ", p.indexid"
SQL += " FROM songs s, albums a, artists ar"
if (playlistid != None and playlistid != ""):
SQL += ", playlistdata p"
SQL += " LEFT OUTER JOIN artists m on s.metaartistid = m.artistid WHERE a.albumid = s.albumid and ar.artistid = s.artistid "
if (albumid != None and albumid != ""):
SQL += "AND s.albumid = %i " % albumid
if (artistid != None and artistid != ""):
SQL += "AND s.artistid = %i " % artistid
if (playlistid != None and playlistid != ""):
SQL += "AND p.playlistid = %i and p.songid = s.songid " % playlistid
if (anchor != None and anchor != ""):
SQL += "AND s.songname like '%s%%' " % anchor
SQL += "ORDER BY"
if playlistid != None and playlistid != "":
SQL += " p.indexid,"
SQL +=" a.year, lower(a.albumname), s.tracknum, s.songname"
cursor.execute(SQL)
for row in cursor.fetchall():
self.fillSongHash(row, result, getgenres)
if artistid != None and artistid != "":
SQL = "SELECT s.songid, s.artistid, ar.artistname, s.albumid, s.tracknum, s.songname, s.filename, s.filesize, s.bitrate, s.songlength, s.timesplayed, a.albumname, a.year, s.metaartistid, m.artistname FROM songs s, albums a, artists ar LEFT OUTER JOIN artists m on s.metaartistid = m.artistid WHERE a.albumid = s.albumid and ar.artistid = s.artistid AND s.metaartistid = %s"
cursor.execute(SQL, artistid)
for row in cursor.fetchall():
self.fillSongHash(row, result)
return result
def fillSongHash(self, row, result, getgenres=True):
log.debug("sqlresult", "Row: %s", row)
timesplayed = 0
if row['s.timesplayed'] != None:
timesplayed = row['s.timesplayed']
somesong = {"songid":row['s.songid'],"artistid":row['s.artistid'],"albumid":row['s.albumid'],"songname":row['s.songname'],"filename":row['s.filename'],"filesize":row['s.filesize'],"songlength":row['s.songlength'],"tracknum":row['s.tracknum'],"timesplayed":timesplayed,"bitrate":row['s.bitrate'],"albumname":row['a.albumname'],"albumyear":row['a.year'],"artistname":row['ar.artistname']}
if 'p.indexid' in row:
somesong['indexid'] = row['p.indexid']
if row['m.artistname'] != None and row['s.metaartistid'] != '-1':
somesong['metaartistid'] = row['s.metaartistid']
somesong['metaartistname'] = row['m.artistname']
if getgenres == True:
somesong['genres'] = self.fillSongGenreHash(row['s.songid'])
result.append(somesong)
def fillSongGenreHash(self, songid):
result = []
cursor = self.conn.cursor()
SQL = "SELECT g.genreid, g.genrename FROM genre_data d INNER JOIN genres g ON d.genreid = g.genreid WHERE d.songid = %s"
cursor.execute(SQL, songid)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"genreid":row['g.genreid'],"genrename":row['g.genrename']})
return result
def topArtists(self, numbertoget):
result = []
cursor = self.conn.cursor()
SQL = "select count(*) as thecount, a.artistname from history h inner join songs s on h.songid = s.songid inner join artists a on a.artistid = s.artistid where h.type = 'p' group by a.artistname order by thecount desc, a.artistname asc limit %s"
cursor.execute(SQL, numbertoget)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistname":row['a.artistname'],"count":int(row['thecount'])})
return result
def topAlbums(self, numbertoget):
result = []
cursor = self.conn.cursor()
SQL = "select count(*) as thecount, al.albumname, a.artistname from history h inner join songs s on h.songid = s.songid inner join albums al on s.albumid = al.albumid inner join artists a on al.artistid = a.artistid where h.type = 'p' group by a.artistname, al.albumname order by thecount desc, a.artistname asc, al.albumname asc limit %s"
cursor.execute(SQL, numbertoget)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistname":row['a.artistname'],"albumname":row['al.albumname'],"count":int(row['thecount'])})
return result
def topSongs(self, numbertoget):
result = []
cursor = self.conn.cursor()
SQL = "select count(*) as thecount, al.albumname, a.artistname, s.songname from history h inner join songs s on h.songid = s.songid inner join albums al on s.albumid = al.albumid inner join artists a on al.artistid = a.artistid where h.type = 'p' group by a.artistname, al.albumname, s.songname order by thecount desc, a.artistname asc, al.albumname asc, s.songname asc limit %s"
cursor.execute(SQL, numbertoget)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistname":row['a.artistname'],"albumname":row['al.albumname'],"songname":row['s.songname'],"count":int(row['thecount'])})
return result
def getStats(self):
result = {}
cursor = self.conn.cursor()
SQL = "select count(*) as numartists from artists"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["numartists"] = int(row['numartists'])
SQL = "select count(*) as numalbums from albums"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["numalbums"] = int(row['numalbums'])
SQL = "select count(*) as numsongs, sum(filesize) as sumfilesize, sum(songlength) as sumsec, avg(filesize) as avgfilesize, avg(songlength) as avgsec from songs"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["numsongs"] = int(row['numsongs'])
result["sumfilesize"] = float(row['sumfilesize'])
result["sumsec"] = float(row['sumsec'])
result["avgfilesize"] = float(row['avgfilesize'])
result["avgsec"] = float(row['avgsec'])
SQL = "select count(*) as songsplayed from history where type = 'p'"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["songsplayed"] = int(row['songsplayed'])
SQL = "select count(*) as songsstarted from history where type = 's'"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["songsstarted"] = int(row['songsstarted'])
return result
def importCache(self):
log.debug("funcs", "Database.importCache()")
result = []
cursor = self.conn.cursor()
SQL = "SELECT filename, modified_date FROM songs ORDER BY filename"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
# if type(row['modified_date']) is not FloatType:
# row['modified_date'] =
result.append({"filename":row['filename'],"modifieddate":row['modified_date']})
cursor.close()
return result
def getmetadata(self, filename):
log.debug("funcs", "Database.getmetadata(%s)", filename)
return getTag(filename)
def importNewSongs(self, songs):
log.debug("funcs", "Database.importNewSongs()")
cursor = self.import_cursor
cursor.begin()
try:
for song in songs:
log.debug("import", "Importing song %s as %s", song["filename"], song)
if "bitrate" in song.keys():
genreid = -1
if 'genre' in song.keys():
genreid = self._getGenre(self.checkBinary(song['genre']))
artistid = self._getArtist(self.checkBinary(song['artistname']),False)
metaartistid = -1
if 'metaartistname' in song.keys():
metaartistid = self._getArtist(self.checkBinary(song['metaartistname']),True)
albumid = self._getAlbum(self.checkBinary(song['albumname']), artistid, song['year'])
songid = self._getNSong(self.checkBinary(song['songname']),artistid,self.checkBinary(song['filename']),song['tracknum'],albumid=albumid,year=song['year'],metaartistid=metaartistid, bitrate=song["bitrate"], songlength=song["songlength"], genreid=genreid)
else:
log.debug("import", "Could not get bitrate of song %s. Assuming bad file.", song["filename"])
except:
cursor.rollback()
raise
cursor.commit()
return True
def _getNSong(self, songname, artistid, filename, tracknum, albumid="", year="", metaartistid=-1, genreid=-1, bitrate=-1, songlength=-1):
log.debug("funcs", "Database._getNSongs()")
sid = -1
songname = string.strip(songname)
filename = string.strip(filename)
cursor = self.import_cursor
statinfo = os.stat(filename)
now = time.time()
if tracknum == -1:
tracknum = 0
if filename not in self.i_songcache:
SQL = "insert into songs (songname, artistid, albumid, year, tracknum, filename, filesize, songlength, bitrate, metaartistid, create_date, modified_date, timesplayed, weight, flags) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 0, 0, 0)"
cursor.execute(SQL, songname, artistid, albumid, year, tracknum, filename, statinfo.st_size, songlength, bitrate, metaartistid, now, now)
self.getalbumstatus = True
sid = cursor.lastrowid
if genreid != -1:
SQL = "insert into genre_data(songid, genreid) VALUES (%s, %s)"
cursor.execute(SQL, sid, genreid)
self.i_songcache[filename] = sid
#TODO: Check to see if there are changes
else:
sid = self.i_songcache["filename"]
SQL = "update songs set modified_date = %s, songname = %s, artistid = %s, albumid = %s, year = %s, tracknum = %s, filename = %s, songlength = %s, bitrate = %s, metaartistid = %s, filesize = %s where songid = %s"
cursor.execute(SQL, now, songname, artistid, albumid, year, tracknum, filename, songlength, bitrate, metaartistid, statinfo.st_size, sid)
if genreid != -1:
SQL = "update genre_data set genreid=%s WHERE songid=%s"
cursor.execute(SQL, genreid, sid)
self.getalbumstatus = False
return sid
def importStart(self):
log.debug("funcs", "Database.importStart()")
session = Session()
self.genrecache = {}
self.artistcache = {}
self.albumcache = {}
self.i_songcache = {}
#self.cursong = session['xinelib'].createSong()
#self.cursong.songInit()
self.import_cursor = self.conn.cursor()
self.import_cursor.nolock=1
cursor=self.import_cursor
SQL = "select artistname,artistid from artists"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
self.artistcache[row[0]] = int(row[1])
SQL = "select artistid,albumname,albumid from albums"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
self.albumcache[str(row[0])+row[1]] = int(row[2])
SQL = "select filename,songid from songs"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
self.i_songcache[row[0]] = int(row[1])
def importEnd(self):
log.debug("funcs", "Database.importEnd()")
cursor = self.import_cursor
cursor.begin()
try:
SQL = "DELETE FROM albums WHERE albumid NOT IN (SELECT albumid FROM songs)"
cursor.execute(SQL)
SQL = "DELETE FROM artists WHERE artistid NOT IN (SELECT artistid FROM songs) and artistid NOT IN (SELECT metaartistid as artistid FROM songs)"
cursor.execute(SQL)
except:
cusor.rollback()
raise
cursor.commit()
log.debug("import", "Import complete, loading song cache (before %d)", len(self.songcache))
try:
self.loadSongCache()
except:
log.exception("Got exception trying to upgrade song cache")
log.debug("import", "Cache update complete. Cache contains %d songs", len(self.songcache))
def importCancel(self):
log.debug("funcs", "Database.importCancel()")
def importUpload(self, filename, songdata):
log.debug("funcs", "Database.importUpload()")
log.debug("import", "getting tag info for: %s" % self.checkBinary(filename))
return getTag(self.checkBinary(filename))
def importSongs(self, somesong):
log.debug("funcs", "Database.importSongs()")
resultmem = {}
genreid = -1
if 'genrename' in somesong.keys():
genreid = self._getGenre(self.checkBinary(somesong['genrename']))
artistid = self._getArtist(self.checkBinary(somesong['artistname']),False)
metaartistid = -1
if 'metaartistname' in somesong.keys():
metaartistid = self._getArtist(self.checkBinary(somesong['metaartistname']),True)
albumid = self._getAlbum(self.checkBinary(somesong['albumname']), artistid, somesong['year'])
songid = self._getSong(self.checkBinary(somesong['songname']),artistid,self.checkBinary(somesong['filename']),somesong['tracknum'],albumid,somesong['year'],metaartistid,genreid=genreid)
resultmem['genreid'] = genreid
resultmem['artistid'] = artistid
resultmem['metaartistid'] = metaartistid
resultmem['albumid'] = albumid
resultmem['songid'] = songid
if self.getgenrestatus != -1:
resultmem['newgenreid'] = self.getgenrestatus
if self.getartiststatus != -1:
resultmem['newartistid'] = self.getartiststatus
if self.getmetaartiststatus != -1:
resultmem['newmetaartistid'] = self.getmetaartiststatus
if self.getalbumstatus != -1:
resultmem['newalbumid'] = self.getalbumstatus
if self.getsongstatus != -1:
resultmem['newsongid'] = self.getsongstatus
return resultmem
def importDelete(self, arrayofsongs):
log.debug("funcs", "Database.importDelete()")
cursor = self.import_cursor
result = 0
for somesong in arrayofsongs:
somesong = self.checkBinary(somesong)
SQL=""
if isinstance(somesong,types.IntType):
SQL = "DELETE FROM songs WHERE songid = %s"
elif isinstance(somesong,types.StringType):
SQL = "DELETE FROM songs WHERE filename = %s"
if SQL!="":
cursor.execute(SQL, somesong)
result += 1
return result
def playlistClear(self, playlistid):
cursor = self.conn.cursor()
SQL = "DELETE FROM playlistdata WHERE playlistid = %s"
cursor.execute(SQL, playlistid)
def addSongToPlaylist(self, playlistid, songid):
cursor = self.conn.cursor()
#SQL = "INSERT INTO playlistdata (playlistid, songid) VALUES (%d, %d)" % (playlistid, songid)
SQL = "INSERT INTO playlistdata (playlistid, songid, indexid) values (%d,%d,(select count(playlistdataid) from playlistdata where playlistid = %s))"
cursor.execute(SQL, playlistid, songid, playlistid)
def removeSongFromPlaylist(self, playlistid, indexid):
playlistdataid = 0
cursor = self.conn.cursor()
SQL = "DELETE from playlistdata where playlistid = %d and indexid = %s"
cursor.execute(SQL, playlistid, indexid)
SQL = "UPDATE playlistdata set indexid = indexid - 1 where playlistid = %s and indexid > %s"
cursor.execute(SQL, playlistid, indexid)
def moveSongInPlaylist(self, playlistid, index1, index2, swap=False):
songs = []
cursor = self.conn.cursor()
SQL = "SELECT songid from playlistdata where playlistid = %s order by indexid"
cursor.execute(SQL, playlistid)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
songs.append(row['songid'])
if index1 > -1 and index1 < len(songs) and index2 > -1 and index2 < len(songs):
if swap == False:
tmp = songs[index1]
songs.pop(index1)
songs.insert(index2, tmp)
else:
tmp = songs[index1]
songs[index1] = songs[index2]
songs[index2] = tmp
self.playlistClear(playlistid)
for i in songs:
self.addSongToPlaylist(playlistid, i)
def _getGenre(self, genrename):
log.debug("funcs", "Database._getGenre()")
gid = -1
genrename = string.strip(genrename)
cursor = self.import_cursor
if genrename not in self.genrecache:
SQL = "select genreid from genres where genrename = %s"
cursor.execute(SQL, genrename)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
gid = row['genreid']
if gid == -1:
now = time.time()
SQL = "insert into genres (genrename, create_date) values (%s, %s)"
cursor.execute(SQL, genrename, now)
self.getgenrestatus = True
SQL = "select genreid from genres where genrename = %s"
cursor.execute(SQL, genrename)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
gid = row['genreid']
else:
SQL = "update genres set genrename = %s, modified_date = %s where genreid=%s"
cursor.execute(SQL, genrename, now, gid)
self.getgenrestatus = False
self.genrecache[genrename] = gid
else:
self.getgenrestatus = -1
gid = self.genrecache[genrename]
return gid
def _getArtist(self, artistname, metaartist=False):
log.debug("funcs", "Database._getArtist()")
aid = -1
artistname = string.strip(artistname)
cursor = self.import_cursor
#See if this artist is already in the cache
if artistname not in self.artistcache:
#SQL = "select artistid from artists where artistname = %s"
#cursor.execute(SQL, artistname)
#for row in cursor.fetchall():
# log.debug("sqlresult", "Row: %s", row)
# aid = row['artistid']
now = time.time()
try:
metaartist = int(metaartist)
except:
metaartist = 0
if aid == -1:
SQL = "insert into artists (artistname, metaflag, create_date, modified_date) VALUES (%s, %s, %s, %s)"
cursor.execute(SQL, artistname, int(metaartist), now, now)
self.getartiststatus = True
aid = cursor.lastrowid
#Not needed until we have genres and/or metaartists
else:
SQL = "update artists set metaflag = %s, modified_date = %s where artistid = %s"
cursor.execute(SQL, metaartist, now, aid)
self.getartiststatus = False
self.artistcache[artistname] = aid
else:
self.getartiststatus = -1
aid = self.artistcache[artistname]
return aid
def _getAlbum(self, albumname, artistid, year):
tid = -1
albumname = string.strip(albumname)
cursor = self.import_cursor
#See if this album is already in the cache
if str(str(artistid) + albumname) not in self.albumcache:
#SQL = "select albumid from albums where albumname = %s"
#cursor.execute(SQL, albumname)
#for row in cursor.fetchall():
# log.debug("sqlresult", "Row: %s", row)
# tid = row['albumid']
now=time.time()
if tid == -1:
SQL = "insert into albums (albumname, artistid, year, create_date, modified_date) VALUES (%s, %s, %s, %s, %s)"
cursor.execute(SQL, albumname, artistid, year, now, now)
self.getalbumstatus = True
tid = cursor.lastrowid
#TODO: Check to see if there are changes
else:
#TODO: Have to add genre code
SQL = "update albums set modified_date = %s, year = %s, artistid = %s where albumid = %s"
cursor.execute(SQL, now, year, artistid, tid)
self.getalbumstatus = False
self.albumcache[str(artistid) + albumname] = tid
else:
self.getalbumstatus = -1
tid = self.albumcache[str(artistid) + albumname]
return tid
def _getSong(self, songname, artistid, filename, tracknum, albumid="", year="", metaartistid=-1, genreid=-1):
sid = -1
songname = string.strip(songname)
filename = string.strip(filename)
cursor = self.import_cursor
#SQL = "select songid from songs where filename = %s"
#cursor.execute(SQL, filename)
#for row in cursor.fetchall():
# log.debug("sqlresult", "Row: %s", row)
# sid = row['songid']
#metadata = self.cursong.getMetaData()
metadata = {}
try:
#Jef 07/30/2003: Not sure why but metadata=metadata.id3.getTag(filename) isnt working
metadata = getTag(filename)
log.debug("import", "Metadata %s", metadata)
except:
log.debug("import", "No metadata for %s", filename)
if "bitrate" not in metadata or "songlength" not in metadata:
pass
#print "before set filename"
#self.cursong.songint.filename = filename
#print "before open"
#self.cursong.songOpen()
#print "before metadata"
#metadata = self.cursong.getMetaData()
#self.cursong.songClose()
#print "after metadata"
statinfo = os.stat(filename)
songlength = 0
if metadata['songlength'] is not None and str(metadata['songlength']) != 'inf':
songlength = metadata['songlength']
now = time.time()
artistid = int(artistid)
albumid = int(albumid)
year = int(year)
if filename not in self.i_songcache:
SQL = "insert into songs (songname, artistid, albumid, year, tracknum, filename, filesize, songlength, bitrate, metaartistid, create_date, modified_date, timesplayed, weight, flags) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 0, 0, 0)"
cursor.execute(SQL, songname, artistid, albumid, year, tracknum, filename, statinfo.st_size, songlength, metadata['bitrate'], metaartistid, now, now)
self.getalbumstatus = True
sid = cursor.lastrowid
if genreid > -1:
SQL = "insert into genre_data(songid, genreid, create_date, modified_date) VALUES (%s, %s, %s, %s)"
cursor.execute(SQL, sid, genreid, now, now)
self.i_songcache[filename] = sid
#TODO: Check to see if there are changes
else:
SQL = "update songs set modified_date = %s, songname = %s, artistid = %s, albumid = %s, year = %s, tracknum = %s, filename = %s, songlength = %s, bitrate = %s, metaartistid = %s, filesize = %s where songid = %s"
cursor.execute(SQL, now, songname, artistid, albumid, year, tracknum, filename, metadata['songlength'], metadata['bitrate'], metaartistid, statinfo.st_size, sid)
if genreid > -1:
SQL = "update genre_data set genreid=%s, modified_date=%s WHERE songid=%s"
cursor.execute(SQL, genreid, now, sid)
self.getalbumstatus = False
return sid
def checkBinary(self, datatocheck):
return UTFstring.decode(datatocheck)
def getCDDB(self, device):
tags = getCDDB(device)
return tags
def pyrip(self, tags, filenames, device):
session = Session()
pyrip (device, session['cfg'], tags, filenames)
return 0
def pyrip_update (self):
ret = pyrip_update()
if ret['done'] == 1:
print "Importing files:"
print ret
session = Session ()
session['cmdint'].db.importstart()
i = 1
for file in ret['filenames_c']:
tag = session['cmdint'].db.importupload(file)
tag['filename'] = file
tag['metaartistname'] = ''
tag['tracknum'] = i
for key in tag:
tmp = UTFstring.encode (tag[key])
tag[key] = tmp
session['cmdint'].db.importsongs(tag)
i = i + 1
session['cmdint'].db.importend()
print "Done importing"
return ret
# vim:ts=8 sw=8 noet
| gpl-2.0 | -2,373,406,627,346,853,400 | 38.735178 | 411 | 0.680643 | false |
Tapo4ek/django-cacheops | cacheops/query.py | 1 | 20226 | # -*- coding: utf-8 -*-
import sys
import json
import threading
import six
from funcy import select_keys, cached_property, once, once_per, monkey, wraps, walk
from funcy.py2 import mapcat, map
from .cross import pickle, md5
import django
from django.utils.encoding import smart_str, force_text
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.models import Manager, Model
from django.db.models.query import QuerySet
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
# This thing was removed in Django 1.8
try:
from django.db.models.query import MAX_GET_RESULTS
except ImportError:
MAX_GET_RESULTS = None
from .conf import model_profile, CACHEOPS_LRU, ALL_OPS
from .utils import monkey_mix, stamp_fields, func_cache_key, cached_view_fab, family_has_profile
from .redis import redis_client, handle_connection_failure, load_script
from .tree import dnfs
from .invalidation import invalidate_obj, invalidate_dict, no_invalidation
from .transaction import in_transaction
from .signals import cache_read
__all__ = ('cached_as', 'cached_view_as', 'install_cacheops')
_local_get_cache = {}
@handle_connection_failure
def cache_thing(cache_key, data, cond_dnfs, timeout):
"""
Writes data to cache and creates appropriate invalidators.
"""
assert not in_transaction()
load_script('cache_thing', CACHEOPS_LRU)(
keys=[cache_key],
args=[
pickle.dumps(data, -1),
json.dumps(cond_dnfs, default=str),
timeout
]
)
def cached_as(*samples, **kwargs):
"""
Caches results of a function and invalidates them same way as given queryset.
NOTE: Ignores queryset cached ops settings, just caches.
"""
timeout = kwargs.get('timeout')
extra = kwargs.get('extra')
key_func = kwargs.get('key_func', func_cache_key)
# If we unexpectedly get list instead of queryset return identity decorator.
# Paginator could do this when page.object_list is empty.
if len(samples) == 1 and isinstance(samples[0], list):
return lambda func: func
def _get_queryset(sample):
if isinstance(sample, Model):
queryset = sample.__class__.objects.filter(pk=sample.pk)
elif isinstance(sample, type) and issubclass(sample, Model):
queryset = sample.objects.all()
else:
queryset = sample
queryset._require_cacheprofile()
return queryset
querysets = map(_get_queryset, samples)
cond_dnfs = mapcat(dnfs, querysets)
key_extra = [qs._cache_key() for qs in querysets]
key_extra.append(extra)
if not timeout:
timeout = min(qs._cacheconf['timeout'] for qs in querysets)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if in_transaction():
return func(*args, **kwargs)
cache_key = 'as:' + key_func(func, args, kwargs, key_extra)
cache_data = redis_client.get(cache_key)
cache_read.send(sender=None, func=func, hit=cache_data is not None)
if cache_data is not None:
return pickle.loads(cache_data)
result = func(*args, **kwargs)
cache_thing(cache_key, result, cond_dnfs, timeout)
return result
return wrapper
return decorator
def cached_view_as(*samples, **kwargs):
return cached_view_fab(cached_as)(*samples, **kwargs)
class QuerySetMixin(object):
@cached_property
def _cacheprofile(self):
profile = model_profile(self.model)
if profile:
self._cacheconf = profile.copy()
self._cacheconf['write_only'] = False
return profile
@cached_property
def _cloning(self):
return 1000
def _require_cacheprofile(self):
if self._cacheprofile is None:
raise ImproperlyConfigured(
'Cacheops is not enabled for %s.%s model.\n'
'If you don\'t want to cache anything by default '
'you can configure it with empty ops.'
% (self.model._meta.app_label, self.model._meta.model_name))
def _cache_key(self):
"""
Compute a cache key for this queryset
"""
md = md5()
md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
# Vary cache key for proxy models
md.update('%s.%s' % (self.model.__module__, self.model.__name__))
# Protect from field list changes in model
md.update(stamp_fields(self.model))
# Use query SQL as part of a key
try:
sql, params = self.query.get_compiler(self._db or DEFAULT_DB_ALIAS).as_sql()
try:
sql_str = sql % params
except UnicodeDecodeError:
sql_str = sql % walk(force_text, params)
md.update(smart_str(sql_str))
except EmptyResultSet:
pass
# If query results differ depending on database
if self._cacheprofile and not self._cacheprofile['db_agnostic']:
md.update(self.db)
# Thing only appeared in Django 1.9
it_class = getattr(self, '_iterable_class', None)
if it_class:
md.update('%s.%s' % (it_class.__module__, it_class.__name__))
# 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier
if hasattr(self, 'flat'):
md.update(str(self.flat))
return 'q:%s' % md.hexdigest()
def _cache_results(self, cache_key, results):
cond_dnfs = dnfs(self)
cache_thing(cache_key, results, cond_dnfs, self._cacheconf['timeout'])
def cache(self, ops=None, timeout=None, write_only=None):
"""
Enables caching for given ops
ops - a subset of {'get', 'fetch', 'count', 'exists'},
ops caching to be turned on, all enabled by default
timeout - override default cache timeout
write_only - don't try fetching from cache, still write result there
NOTE: you actually can disable caching by omiting corresponding ops,
.cache(ops=[]) disables caching for this queryset.
"""
self._require_cacheprofile()
if ops is None or ops == 'all':
ops = ALL_OPS
if isinstance(ops, str):
ops = {ops}
self._cacheconf['ops'] = set(ops)
if timeout is not None:
self._cacheconf['timeout'] = timeout
if write_only is not None:
self._cacheconf['write_only'] = write_only
return self
def nocache(self):
"""
Convinience method, turns off caching for this queryset
"""
# cache profile not present means caching is not enabled for this model
if self._cacheprofile is None:
return self
else:
return self.cache(ops=[])
def cloning(self, cloning=1000):
self._cloning = cloning
return self
def inplace(self):
return self.cloning(0)
if django.VERSION >= (1, 9):
def _clone(self, **kwargs):
if self._cloning:
return self.clone(**kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, **kwargs):
kwargs.setdefault('_cacheprofile', self._cacheprofile)
if hasattr(self, '_cacheconf'):
kwargs.setdefault('_cacheconf', self._cacheconf)
clone = self._no_monkey._clone(self, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
else:
def _clone(self, klass=None, setup=False, **kwargs):
if self._cloning:
return self.clone(klass, setup, **kwargs)
elif klass is not None:
# HACK: monkey patch self.query.clone for single call
# to return itself instead of cloning
original_query_clone = self.query.clone
def query_clone():
self.query.clone = original_query_clone
return self.query
self.query.clone = query_clone
return self.clone(klass, setup, **kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, klass=None, setup=False, **kwargs):
kwargs.setdefault('_cacheprofile', self._cacheprofile)
if hasattr(self, '_cacheconf'):
kwargs.setdefault('_cacheconf', self._cacheconf)
clone = self._no_monkey._clone(self, klass, setup, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
def iterator(self):
# If cache is not enabled or in transaction just fall back
if not self._cacheprofile or 'fetch' not in self._cacheconf['ops'] \
or in_transaction():
return self._no_monkey.iterator(self)
cache_key = self._cache_key()
if not self._cacheconf['write_only'] and not self._for_write:
# Trying get data from cache
cache_data = redis_client.get(cache_key)
cache_read.send(sender=self.model, func=None, hit=cache_data is not None)
if cache_data is not None:
return iter(pickle.loads(cache_data))
# Cache miss - fetch data from overriden implementation
def iterate():
# NOTE: we are using self._result_cache to avoid fetching-while-fetching bug #177
self._result_cache = []
for obj in self._no_monkey.iterator(self):
self._result_cache.append(obj)
yield obj
self._cache_results(cache_key, self._result_cache)
return iterate()
def count(self):
if self._cacheprofile and 'count' in self._cacheconf['ops']:
# Optmization borrowed from overriden method:
# if queryset cache is already filled just return its len
if self._result_cache is not None:
return len(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.count(self))()
else:
return self._no_monkey.count(self)
def get(self, *args, **kwargs):
# .get() uses the same .iterator() method to fetch data,
# so here we add 'fetch' to ops
if self._cacheprofile and 'get' in self._cacheconf['ops']:
# NOTE: local_get=True enables caching of simple gets in local memory,
# which is very fast, but not invalidated.
# Don't bother with Q-objects, select_related and previous filters,
# simple gets - thats what we are really up to here.
if self._cacheprofile['local_get'] \
and not args \
and not self.query.select_related \
and not self.query.where.children:
# NOTE: We use simpler way to generate a cache key to cut costs.
# Some day it could produce same key for diffrent requests.
key = (self.__class__, self.model) + tuple(sorted(kwargs.items()))
try:
return _local_get_cache[key]
except KeyError:
_local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs)
return _local_get_cache[key]
except TypeError:
# If some arg is unhashable we can't save it to dict key,
# we just skip local cache in that case
pass
if 'fetch' in self._cacheconf['ops']:
qs = self
else:
qs = self._clone().cache()
else:
qs = self
return qs._no_monkey.get(qs, *args, **kwargs)
def exists(self):
if self._cacheprofile and 'exists' in self._cacheconf['ops']:
if self._result_cache is not None:
return bool(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.exists(self))()
else:
return self._no_monkey.exists(self)
def bulk_create(self, objs, batch_size=None):
objs = self._no_monkey.bulk_create(self, objs, batch_size=batch_size)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj)
return objs
def invalidated_update(self, **kwargs):
clone = self._clone().nocache()
clone._for_write = True # affects routing
objects = list(clone.iterator()) # bypass queryset cache
rows = clone.update(**kwargs)
objects.extend(clone.iterator())
for obj in objects:
invalidate_obj(obj)
return rows
def connect_first(signal, receiver, sender):
old_receivers = signal.receivers
signal.receivers = []
signal.connect(receiver, sender=sender)
signal.receivers += old_receivers
# We need to stash old object before Model.save() to invalidate on its properties
_old_objs = threading.local()
class ManagerMixin(object):
@once_per('cls')
def _install_cacheops(self, cls):
cls._cacheprofile = model_profile(cls)
if family_has_profile(cls):
# Set up signals
connect_first(pre_save, self._pre_save, sender=cls)
connect_first(post_save, self._post_save, sender=cls)
connect_first(post_delete, self._post_delete, sender=cls)
# Install auto-created models as their module attributes to make them picklable
module = sys.modules[cls.__module__]
if not hasattr(module, cls.__name__):
setattr(module, cls.__name__, cls)
def contribute_to_class(self, cls, name):
self._no_monkey.contribute_to_class(self, cls, name)
# Django 1.7+ migrations create lots of fake models, just skip them
# NOTE: we make it here rather then inside _install_cacheops()
# because we don't want @once_per() to hold refs to all of them.
if cls.__module__ != '__fake__':
self._install_cacheops(cls)
def _pre_save(self, sender, instance, **kwargs):
if instance.pk is not None and not no_invalidation.active:
try:
_old_objs.__dict__[sender, instance.pk] = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass
def _post_save(self, sender, instance, **kwargs):
# Invoke invalidations for both old and new versions of saved object
old = _old_objs.__dict__.pop((sender, instance.pk), None)
if old:
invalidate_obj(old)
invalidate_obj(instance)
# NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
# but its base having one. Or vice versa.
# We still need to invalidate in this case, but cache on save better be skipped.
if not instance._cacheprofile or in_transaction():
return
# Enabled cache_on_save makes us write saved object to cache.
# Later it can be retrieved with .get(<cache_on_save_field>=<value>)
# <cache_on_save_field> is pk unless specified.
# This sweet trick saves a db request and helps with slave lag.
cache_on_save = instance._cacheprofile.get('cache_on_save')
if cache_on_save:
# HACK: We get this object "from field" so it can contain
# some undesirable attributes or other objects attached.
# RelatedField accessors do that, for example.
#
# So we strip down any _*_cache attrs before saving
# and later reassign them
unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
for k in unwanted_dict:
del instance.__dict__[k]
key = 'pk' if cache_on_save is True else cache_on_save
cond = {key: getattr(instance, key)}
qs = sender.objects.inplace().filter(**cond).order_by()
if MAX_GET_RESULTS:
qs = qs[:MAX_GET_RESULTS + 1]
qs._cache_results(qs._cache_key(), [instance])
# Reverting stripped attributes
instance.__dict__.update(unwanted_dict)
def _post_delete(self, sender, instance, **kwargs):
"""
Invalidation upon object deletion.
"""
# NOTE: this will behave wrong if someone changed object fields
# before deletion (why anyone will do that?)
invalidate_obj(instance)
def inplace(self):
return self.get_queryset().inplace()
def cache(self, *args, **kwargs):
return self.get_queryset().cache(*args, **kwargs)
def nocache(self):
return self.get_queryset().nocache()
def invalidated_update(self, **kwargs):
return self.get_queryset().inplace().invalidated_update(**kwargs)
def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None,
**kwargs):
"""
Invoke invalidation on m2m changes.
"""
# Skip this machinery for explicit through tables,
# since post_save and post_delete events are triggered for them
if not sender._meta.auto_created:
return
if action not in ('pre_clear', 'post_add', 'pre_remove'):
return
m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many
if m2m.rel.through == sender)
# TODO: optimize several invalidate_objs/dicts at once
if action == 'pre_clear':
# TODO: always use column names here once Django 1.3 is dropped
instance_field = m2m.m2m_reverse_field_name() if reverse else m2m.m2m_field_name()
objects = sender.objects.filter(**{instance_field: instance.pk})
for obj in objects:
invalidate_obj(obj)
elif action in ('post_add', 'pre_remove'):
instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name()
if reverse:
instance_column, model_column = model_column, instance_column
# NOTE: we don't need to query through objects here,
# cause we already know all their meaningfull attributes.
for pk in pk_set:
invalidate_dict(sender, {
instance_column: instance.pk,
model_column: pk
})
@once
def install_cacheops():
"""
Installs cacheops by numerous monkey patches
"""
monkey_mix(Manager, ManagerMixin)
monkey_mix(QuerySet, QuerySetMixin)
QuerySet._cacheprofile = QuerySetMixin._cacheprofile
QuerySet._cloning = QuerySetMixin._cloning
# DateQuerySet existed in Django 1.7 and earlier
# Values*QuerySet existed in Django 1.8 and earlier
from django.db.models import query
for cls_name in ('ValuesQuerySet', 'ValuesListQuerySet', 'DateQuerySet'):
if hasattr(query, cls_name):
cls = getattr(query, cls_name)
monkey_mix(cls, QuerySetMixin, ['iterator'])
# Use app registry to introspect used apps
from django.apps import apps
# Install profile and signal handlers for any earlier created models
for model in apps.get_models(include_auto_created=True):
model._default_manager._install_cacheops(model)
# Turn off caching in admin
if apps.is_installed('django.contrib.admin'):
from django.contrib.admin.options import ModelAdmin
@monkey(ModelAdmin)
def get_queryset(self, request):
return get_queryset.original(self, request).nocache()
# Bind m2m changed handler
m2m_changed.connect(invalidate_m2m)
# Make buffers/memoryviews pickleable to serialize binary field data
if six.PY2:
import copy_reg
copy_reg.pickle(buffer, lambda b: (buffer, (bytes(b),)))
if six.PY3:
import copyreg
copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
| bsd-3-clause | -6,195,858,916,017,696,000 | 37.234405 | 98 | 0.597399 | false |
leigh123linux/Cinnamon | files/usr/share/cinnamon/cinnamon-settings/modules/cs_workspaces.py | 3 | 2070 | #!/usr/bin/python3
from SettingsWidgets import SidePage
from xapp.GSettingsWidgets import *
class Module:
name = "workspaces"
category = "prefs"
comment = _("Manage workspace preferences")
def __init__(self, content_box):
keywords = _("workspace, osd, expo, monitor")
sidePage = SidePage(_("Workspaces"), "cs-workspaces", keywords, content_box, module=self)
self.sidePage = sidePage
def shouldLoad(self):
return True
def on_module_selected(self):
if not self.loaded:
print("Loading Workspaces module")
page = SettingsPage()
self.sidePage.add_widget(page)
settings = page.add_section(_("Workspace Options"))
switch = GSettingsSwitch(_("Enable workspace OSD"), "org.cinnamon", "workspace-osd-visible")
settings.add_row(switch)
switch = GSettingsSwitch(_("Allow cycling through workspaces"), "org.cinnamon.muffin", "workspace-cycle")
settings.add_row(switch)
switch = GSettingsSwitch(_("Only use workspaces on primary monitor (requires Cinnamon restart)"), "org.cinnamon.muffin", "workspaces-only-on-primary")
settings.add_row(switch)
switch = GSettingsSwitch(_("Display Expo view as a grid"), "org.cinnamon", "workspace-expo-view-as-grid")
settings.add_row(switch)
# Edge Flip doesn't work well, so it's there in gsettings, but we don't show it to users yet
# switch = GSettingsSwitch(_("Enable Edge Flip"), "org.cinnamon", "enable-edge-flip")
# settings.add_row(switch)
# spin = GSettingsSpinButton(_("Edge Flip delay"), "org.cinnamon", "edge-flip-delay", mini=1, maxi=3000, units=_("ms"))
# settings.add_reveal_row(spin, "org.cinnamon", "enable-edge-flip")
switch = GSettingsSwitch(_("Invert the left and right arrow key directions used to shift workspaces during a window drag"), "org.cinnamon.muffin", "invert-workspace-flip-direction")
settings.add_row(switch)
| gpl-2.0 | 622,025,873,954,569,200 | 42.125 | 193 | 0.638164 | false |
vmassuchetto/dnstorm | dnstorm/app/migrations/0005_auto__add_field_idea_description.py | 1 | 16145 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Criteria.created'
db.add_column('dnstorm_criteria', 'created',
self.gf('django.db.models.fields.DateTimeField')(default='2000-01-01', auto_now_add=True, blank=True),
keep_default=False)
# Adding field 'Criteria.updated'
db.add_column('dnstorm_criteria', 'updated',
self.gf('django.db.models.fields.DateTimeField')(default='2000-01-01', auto_now=True, blank=True),
keep_default=False)
# Adding field 'Idea.description'
db.add_column('dnstorm_idea', 'description',
self.gf('django.db.models.fields.TextField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Criteria.created'
db.delete_column('dnstorm_criteria', 'created')
# Deleting field 'Criteria.updated'
db.delete_column('dnstorm_criteria', 'updated')
# Deleting field 'Idea.description'
db.delete_column('dnstorm_idea', 'description')
models = {
u'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': u"orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'app.alternative': {
'Meta': {'object_name': 'Alternative', 'db_table': "'dnstorm_alternative'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'alternative_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2001-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['app.Idea']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "'dnstorm_comment'"},
'alternative': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Alternative']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Criteria']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Idea']", 'null': 'True', 'blank': 'True'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.criteria': {
'Meta': {'object_name': 'Criteria', 'db_table': "'dnstorm_criteria'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'criteria_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'fmt': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'min': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'order': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'result': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '60', 'populate_from': "'name'", 'unique_with': '()'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'app.idea': {
'Meta': {'object_name': 'Idea', 'db_table': "'dnstorm_idea'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'idea_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.ideacriteria': {
'Meta': {'object_name': 'IdeaCriteria', 'db_table': "'dnstorm_idea_criteria'"},
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Criteria']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Idea']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_currency': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'value_number': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'value_scale': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'value_time': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'app.invitation': {
'Meta': {'object_name': 'Invitation', 'db_table': "'dnstorm_invitation'"},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'app.option': {
'Meta': {'object_name': 'Option', 'db_table': "'dnstorm_option'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'app.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "'dnstorm_problem'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'author'", 'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'collaborator': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborator'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('ckeditor.fields.RichTextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '60', 'populate_from': "'title'", 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.vote': {
'Meta': {'object_name': 'Vote', 'db_table': "'dnstorm_vote'"},
'alternative': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_alternative'", 'null': 'True', 'to': u"orm['app.Alternative']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_comment'", 'null': 'True', 'to': u"orm['app.Alternative']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2001-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_idea'", 'null': 'True', 'to': u"orm['app.Idea']"}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['app'] | gpl-2.0 | -7,145,299,977,409,573,000 | 81.377551 | 208 | 0.552865 | false |
czepluch/pysecp256k1 | c_secp256k1/__init__.py | 1 | 9618 | import numbers
import struct
from glob import glob
from os import path
try:
from secrets import SystemRandom
random = SystemRandom()
except ImportError:
try:
from random import SystemRandom
random = SystemRandom()
except ImportError:
import random
from bitcoin import electrum_sig_hash as _b_electrum_sig_hash
from bitcoin import encode_sig as _b_encode_sig
from bitcoin import decode_sig as _b_decode_sig
from bitcoin import N, P
secpk1n = 115792089237316195423570985008687907852837564279074904382605163141518161494337
try:
from ._c_secp256k1 import ffi
except ImportError as e:
raise ImportError(
"CFFI extension not found. You need to install this package before use. %r" % e)
try:
obj_name = glob(path.abspath(path.join(path.dirname(__file__), "libsecp256k1*")))[0]
except Exception as e:
raise ImportError(
"secp256k1 lib not found. You need to run 'python setup.py build' or see README %r" % e)
lib = ffi.dlopen(obj_name)
# ffi definition of the context
ctx = lib.secp256k1_context_create(3)
# arbitrary data used by the nonce generation function
ndata = ffi.new("unsigned char[]", bytes(bytearray(random.getrandbits(8) for _ in range(32))))
# helpers
class InvalidPubkeyError(Exception):
pass
class InvalidSignatureError(Exception):
pass
class InvalidPrivateKeyError(Exception):
pass
if hasattr(int, 'to_bytes'):
def _int_to_big_endian32(value):
return value.to_bytes(32, byteorder='big')
else:
def _int_to_big_endian32(value):
cs = []
while value > 0:
cs.append(chr(value % 256))
value /= 256
s = b''.join(reversed(cs))
return b'\x00' * (32 - len(s)) + s
if hasattr(int, 'from_bytes'):
def _big_endian_to_int(value):
return int.from_bytes(value, byteorder='big')
else:
def _big_endian_to_int(value):
return int(value.encode('hex'), 16)
def _encode_sig(v, r, s):
assert isinstance(v, numbers.Integral)
assert v in (27, 28)
vb, rb, sb = bytes(bytearray((v - 27,))), _int_to_big_endian32(r), _int_to_big_endian32(s)
return rb + sb + vb
def _decode_sig(sig):
return ord(sig[64:65]) + 27, _big_endian_to_int(sig[0:32]), _big_endian_to_int(sig[32:64])
def _verify_seckey(seckey):
# Validate seckey
is_valid = lib.secp256k1_ec_seckey_verify(ctx, seckey)
return is_valid
def _deserialize_pubkey(pub):
pubkey = ffi.new("secp256k1_pubkey *")
# Return 1 if pubkey is valid
valid_pub = lib.secp256k1_ec_pubkey_parse(
ctx, # const secp256k1_context*
pubkey, # secp256k1_pubkey*
pub, # const unsigned char
len(pub) # size_t
)
if not valid_pub:
raise InvalidPubkeyError()
return pubkey
def _serialize_pubkey(pub):
serialized_pubkey = ffi.new("unsigned char[65]")
outputlen = ffi.new("size_t *")
# Serialize a pubkey object into a serialized byte sequence.
lib.secp256k1_ec_pubkey_serialize(
ctx,
serialized_pubkey,
outputlen,
pub,
0 # SECP256K1_EC_COMPRESSED
)
return serialized_pubkey
def _der_deserialize_signature(in_sig):
sig = ffi.new("secp256k1_ecdsa_signature *")
# Return 1 when signature could be parsed
valid_sig = lib.secp256k1_ecdsa_signature_parse_der(
ctx, # const secp256k1_context*
sig, # secp256k1_ecdsa_signature*
in_sig, # const unsigned char
len(in_sig) # size_t
)
if not valid_sig:
raise InvalidSignatureError()
return sig
def _der_serialize_signature(sig):
serialized_sig = ffi.new("unsigned char[65]")
outputlen = ffi.new("size_t *")
# Serialize a pubkey object into a serialized byte sequence.
serializeable = lib.secp256k1_ecdsa_signature_serialize_der(
ctx,
serialized_sig,
outputlen,
sig, # secp256k1_ecdsa_signature *
)
assert serializeable == 1
return serialized_sig
def _ecdsa_sign_recoverable(msg32, seckey):
"""
Takes a message of 32 bytes and a private key
Returns a recoverable signature of length 64
"""
assert isinstance(msg32, bytes)
assert isinstance(seckey, bytes)
assert len(msg32) == len(seckey) == 32
if not _verify_seckey(seckey):
raise InvalidPrivateKeyError()
# Make a recoverable signature of 65 bytes
sig64 = ffi.new("secp256k1_ecdsa_recoverable_signature *")
lib.secp256k1_ecdsa_sign_recoverable(
ctx,
sig64,
msg32,
seckey,
ffi.addressof(lib, "secp256k1_nonce_function_default"),
ndata,
)
return sig64
def _parse_to_recoverable_signature(sig):
"""
Returns a parsed recoverable signature of length 65 bytes
"""
# Buffer for getting values of signature object
assert isinstance(sig, bytes)
assert len(sig) == 65
# Make a recoverable signature of 65 bytes
rec_sig = ffi.new("secp256k1_ecdsa_recoverable_signature *")
# Retrieving the recid from the last byte of the signed key
recid = ord(sig[64:65])
# Parse a revoverable signature
parsable_sig = lib.secp256k1_ecdsa_recoverable_signature_parse_compact(
ctx,
rec_sig,
sig,
recid
)
# Verify that the signature is parsable
if not parsable_sig:
raise InvalidSignatureError()
return rec_sig
def _check_signature(sig_compact):
if not len(sig_compact) == 65:
raise InvalidSignatureError()
v, r, s = _decode_sig(sig_compact)
if r >= N or s >= P or v < 27 or v > 28 or r < 1 or s < 1 or s >= secpk1n:
raise InvalidSignatureError()
if not (r < secpk1n and s < secpk1n and (v == 27 or v == 28)):
raise InvalidSignatureError()
# compact encoding
def ecdsa_sign_compact(msg32, seckey):
"""
Takes the same message and seckey as _ecdsa_sign_recoverable
Returns an unsigned char array of length 65 containing the signed message
"""
# Assign 65 bytes to output
output64 = ffi.new("unsigned char[65]")
# ffi definition of recid
recid = ffi.new("int *")
lib.secp256k1_ecdsa_recoverable_signature_serialize_compact(
ctx,
output64,
recid,
_ecdsa_sign_recoverable(msg32, seckey)
)
# Assign recid to the last byte in the output array
r = ffi.buffer(output64)[:64] + struct.pack("B", recid[0])
assert len(r) == 65, len(r)
return r
def ecdsa_recover_compact(msg32, sig):
"""
Takes the a message and a parsed recoverable signature
Returns the serialized public key from the private key in the sign function
"""
assert isinstance(msg32, bytes)
assert len(msg32) == 32
_check_signature(sig)
# Check that recid is of valid value
recid = ord(sig[64:65])
if not (recid >= 0 and recid <= 3):
raise InvalidSignatureError()
# Setting the pubkey array
pubkey = ffi.new("secp256k1_pubkey *")
lib.secp256k1_ecdsa_recover(
ctx,
pubkey,
_parse_to_recoverable_signature(sig),
msg32
)
serialized_pubkey = _serialize_pubkey(pubkey)
buf = ffi.buffer(serialized_pubkey, 65)
r = buf[:]
assert isinstance(r, bytes)
assert len(r) == 65, len(r)
return r
def ecdsa_verify_compact(msg32, sig, pub):
"""
Takes a message of length 32 and a signed message and a pubkey
Returns True if the signature is valid
"""
assert isinstance(msg32, bytes)
assert len(msg32) == 32
# Check if pubkey has been bin_electrum encoded.
# If so, append \04 to the front of the key, to make sure the length is 65
if len(pub) == 64:
pub = b'\04'+pub
assert len(pub) == 65
_check_signature(sig)
# Setting the pubkey array
c_sig = ffi.new("secp256k1_ecdsa_signature *")
# converts the recoverable signature to a signature
lib.secp256k1_ecdsa_recoverable_signature_convert(
ctx,
c_sig,
_parse_to_recoverable_signature(sig)
)
is_valid = lib.secp256k1_ecdsa_verify(
ctx,
c_sig, # const secp256k1_ecdsa_signature
msg32, # const unsigned char
_deserialize_pubkey(pub) # const secp256k1_pubkey
)
return is_valid == 1
# raw encoding (v, r, s)
def ecdsa_sign_raw(rawhash, key):
"""
Takes a rawhash message and a private key and returns a tuple
of the v, r, s values.
"""
return _decode_sig(ecdsa_sign_compact(rawhash, key))
def ecdsa_recover_raw(rawhash, vrs):
"""
Takes a rawhash message of length 32 bytes and a (v, r, s) tuple
Returns a public key for the private key used in the sign function
"""
assert len(vrs) == 3
assert len(rawhash) == 32
return ecdsa_recover_compact(rawhash, _encode_sig(*vrs))
def ecdsa_verify_raw(msg32, vrs, pub):
"""
Takes a message, the signature being verified and a pubkey
Returns 1 if signature is valid with given pubkey
"""
# assert len(vrs) == 3
if len(vrs) == 3:
return ecdsa_verify_compact(msg32, _encode_sig(*vrs), pub)
else:
return ecdsa_verify_compact(msg32, vrs, pub)
# DER encoding
def ecdsa_sign_der(msg, seckey):
return _b_encode_sig(*ecdsa_sign_raw(_b_electrum_sig_hash(msg), seckey))
def ecdsa_recover_der(msg, sig):
return ecdsa_recover_raw(_b_electrum_sig_hash(msg), _b_decode_sig(sig))
def ecdsa_verify_der(msg, sig, pub):
return ecdsa_verify_raw(_b_electrum_sig_hash(msg), _b_decode_sig(sig), pub)
| mit | -740,912,916,495,049,200 | 26.797688 | 96 | 0.638698 | false |
LamaHamadeh/Microsoft-DAT210x | Module 5/assignment3.py | 1 | 6640 | '''
author Lama Hamadeh
'''
import pandas as pd
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib
from sklearn.cluster import KMeans
matplotlib.style.use('ggplot') # Look Pretty
#
# INFO: This dataset has call records for 10 users tracked over the course of 3 years.
# Your job is to find out where the users likely live at!
'''
def showandtell(title=None):
if title != None: plt.savefig(title + ".png", bbox_inches='tight', dpi=300)
plt.show()
exit()
'''
def clusterInfo(model):
print ("Cluster Analysis Inertia: ", model.inertia_)
print ('------------------------------------------')
for i in range(len(model.cluster_centers_)):
print ("\n Cluster ", i)
print (" Centroid ", model.cluster_centers_[i])
print (" #Samples ", (model.labels_==i).sum()) # NumPy Power
# Find the cluster with the least # attached nodes
def clusterWithFewestSamples(model):
# Ensure there's at least on cluster...
minSamples = len(model.labels_)
minCluster = 0
for i in range(len(model.cluster_centers_)):
if minSamples > (model.labels_==i).sum():
minCluster = i
minSamples = (model.labels_==i).sum()
print ("\n Cluster With Fewest Samples: "), minCluster
return (model.labels_==minCluster)
def doKMeans(data, clusters=0):
#
# TODO: Be sure to only feed in Lat and Lon coordinates to the KMeans algo, since none of the other
# data is suitable for your purposes. Since both Lat and Lon are (approximately) on the same scale,
# no feature scaling is required. Print out the centroid locations and add them onto your scatter
# plot. Use a distinguishable marker and color.
#
# Hint: Make sure you fit ONLY the coordinates, and in the CORRECT order (lat first).
# This is part of your domain expertise.
#
# .. your code here ..
dataframe = pd.concat([data.TowerLon, data.TowerLat], axis = 1)
kmeans = KMeans(n_clusters=clusters)
labels = kmeans.fit_predict(dataframe)
# INFO: Print and plot the centroids...
centroids = kmeans.cluster_centers_
ax.scatter(x = centroids[:, 0], y = centroids[:, 1], marker='x', c='red', alpha=0.9, linewidths=3, s=250)
model = kmeans
return model
#
# TODO: Load up the dataset and take a peek at its head and dtypes.
# Convert the date using pd.to_datetime, and the time using pd.to_timedelta
#
# .. your code here ..
df=pd.read_csv('/Users/lamahamadeh/Downloads/Modules/DAT210x-master/Module5/Datasets/CDR.csv')
#print(df)
print(df.dtypes)
df.CallDate = pd.to_datetime(df.CallDate) # Converts the entries in the 'CallDate' column to datetime
df.CallTime = pd.to_timedelta(df.CallTime) # Converts the entries in the 'CallTime' column to timedelta
df.Duration = pd.to_timedelta(df.Duration) # Converts the entries in the 'Duration' column to timedelta
print(df.dtypes)
#
# TODO: Get a distinct list of "In" phone numbers (users) and store the values in a
# regular python list (i.e., numpy.ndarray).
# Hint: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.tolist.html
#
# .. your code here ..
in_numbers = df.In.unique() #distinct == unique
#print(in_numbers)
print(type(in_numbers)) #numpy.ndarray
#
# INFO: The locations map above should be too "busy" to really wrap your head around. This
# is where domain expertise comes into play. Your intuition tells you that people are likely
# to behave differently on weekends:
#
# On Weekdays:
# 1. People probably don't go into work
# 2. They probably sleep in late on Saturday
# 3. They probably run a bunch of random errands, since they couldn't during the week
# 4. They should be home, at least during the very late hours, e.g. 1-4 AM
#
# On Weekdays:
# 1. People probably are at work during normal working hours
# 2. They probably are at home in the early morning and during the late night
# 3. They probably spend time commuting between work and home everyday
#print ("\n\nExamining person: ", 0)
#
# TODO: Create a slice called user1 that filters to only include dataset records where the
# "In" feature (user phone number) is equal to the first number on your unique list above
#
# .. your code here ..
user1 = df[(df.In == in_numbers[0])]
print(user1)
#
# TODO: Alter your slice so that it includes only Weekday (Mon-Fri) values.
#
# .. your code here ..
user1 = user1[(user1.DOW == 'Mon') | (user1.DOW == 'Tue')| (user1.DOW == 'Wed')| (user1.DOW == 'Thu')
| (user1.DOW == 'Fri')]
print(user1)
#
# TODO: The idea is that the call was placed before 5pm. From Midnight-730a, the user is
# probably sleeping and won't call / wake up to take a call. There should be a brief time
# in the morning during their commute to work, then they'll spend the entire day at work.
# So the assumption is that most of the time is spent either at work, or in 2nd, at home.
#
# .. your code here ..
user1 = user1[(user1.CallTime < '17:00:00')]
print(user1)
print(len(user1))
#
# TODO: Plot the Cell Towers the user connected to
#
# .. your code here ..
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(user1.TowerLon,user1.TowerLat, c='g', marker='o', alpha=0.2)
ax.set_title('Weekdays Calls (<5p.m)')
#
# INFO: Run K-Means with K=3 or K=4. There really should only be a two areas of concentration. If you
# notice multiple areas that are "hot" (multiple areas the usr spends a lot of time at that are FAR
# apart from one another), then increase K=5, with the goal being that all centroids except two will
# sweep up the annoying outliers and not-home, not-work travel occasions. the other two will zero in
# on the user's approximate home location and work locations. Or rather the location of the cell
# tower closest to them.....
model = doKMeans(user1, 3)
#
# INFO: Print out the mean CallTime value for the samples belonging to the cluster with the LEAST
# samples attached to it. If our logic is correct, the cluster with the MOST samples will be work.
# The cluster with the 2nd most samples will be home. And the K=3 cluster with the least samples
# should be somewhere in between the two. What time, on average, is the user in between home and
# work, between the midnight and 5pm?
midWayClusterIndices = clusterWithFewestSamples(model)
midWaySamples = user1[midWayClusterIndices]
print ("Its Waypoint Time: ", midWaySamples.CallTime.mean())
#
# Let's visualize the results!
# First draw the X's for the clusters:
ax.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1], s=169, c='r', marker='x', alpha=0.8, linewidths=2)
#
# Then save the results:
#showandtell('Weekday Calls Centroids') # Comment this line out when you're ready to proceed
plt.show()
| mit | -1,770,293,944,817,420,300 | 33.764398 | 119 | 0.708434 | false |
lyhrobin00007/FlaskCTA | app/tdbpy/tdbapi.py | 1 | 12173 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 07 13:21:35 2016
@author: 024536
"""
import tdbpy
from datetime import datetime, timedelta
pSetting = {
'szIP':"172.22.137.140",
'szPort':"20003",
'szUser':"liyonghan",
'szPassword':"liyo1234",
'nTimeOutVal':10,
'nRetryCount':10,
'nRetryGap':10
}
pProxySetting = {
#TDB_PROXY_TYPE
# 0 TDB_PROXY_SOCK4
# 1 TDB_PROXY_SOCK4A
# 2 TDB_PROXY_SOCK5
# 3 TDB_PROXY_HTTP11
'nProxyType':0,
'szProxyHostIp':"",
'szProxyPort':"",
'szProxyUser':"",
'szProxyPwd':"",
}
DictError = {
0:"TDB_SUCCESS", #TDB_SUCCESS = 0,
-1:"TDB_NETWORK_ERROR", #网络错误
-2:"TDB_NETWORK_TIMEOUT", #网络超时
-3:"TDB_NO_DATA", #没有数据
-4:"TDB_OUT_OF_MEMORY", #内存耗尽
-5:"TDB_LOGIN_FAILED", #登陆失败
-11:"TDB_INVALID_PARAMS", #无效的参数
-10:"TDB_INVALID_CODE_TYPE",#无效的代码类型,比如向TDB_GetFuture传入非期货类型代码,返回之。
-50:"TDB_WRONG_FORMULA", #指标公式错误
}
DictFuture = ["chWindCode","chCode","nDate","nTime","iVolume","iTurover"
,"nSettle","nPosition","nCurDelta","chTradeFlag"
,"iAccVolume","iAccTurover","nHigh","nLow","nOpen","nPrice"
,"nPreClose","nPreSettle","nPrePosition"]
DictFutureAB = ["chWindCode","chCode","nDate","nTime","iVolume","iTurover"
,"nSettle","nPosition","nCurDelta","chTradeFlag"
,"iAccVolume","iAccTurover","nHigh","nLow","nOpen","nPrice"
,"nAskPrice1","nAskPrice2","nAskPrice3","nAskPrice4","nAskPrice5"
,"nAskVolume1","nAskVolume2","nAskVolume3","nAskVolume4","nAskVolume5"
,"nBidPrice1","nBidPrice2","nBidPrice3","nBidPrice4","nBidPrice5"
,"nBidVolume1","nBidVolume2","nBidVolume3","nBidVolume4","nBidVolume5"
,"nPreClose","nPreSettle","nPrePosition"]
DictTick = ["chWindCode","chCode","nDate","nTime","nPrice","iVolume","iTurover"
,"nMatchItems","nInterest","chTradeFlag","chBSFlag"
,"iAccVolume","iAccTurover","nHigh","nLow","nOpen","nPreClose"
,"nIndex","nStocks","nUps","nDowns","nHoldLines"]
DictTickAB = ["chWindCode","chCode","nDate","nTime","nPrice","iVolume","iTurover"
,"nMatchItems","nInterest","chTradeFlag","chBSFlag"
,"iAccVolume","iAccTurover","nHigh","nLow","nOpen","nPreClose"
,"nAskPrice1","nAskPrice2","nAskPrice3","nAskPrice4","nAskPrice5"
,"nAskPrice6","nAskPrice7","nAskPrice8","nAskPrice9","nAskPrice10"
,"nAskVolume1","nAskVolume2","nAskVolume3","nAskVolume4","nAskVolume5"
,"nAskVolume6","nAskVolume7","nAskVolume8","nAskVolume9","nAskVolume10"
,"nBidPrice1","nBidPrice2","nBidPrice3","nBidPrice4","nBidPrice5"
,"nBidPrice6","nBidPrice7","nBidPrice8","nBidPrice9","nBidPrice10"
,"nBidVolume1","nBidVolume2","nBidVolume3","nBidVolume4","nBidVolume5"
,"nBidVolume6","nBidVolume7","nBidVolume8","nBidVolume9","nBidVolume10"
,"nAskAvPrice","nBidAvPrice","iTotalAskVolume","iTotalBidVolume"
,"nIndex","nStocks","nUps","nDowns","nHoldLines"]
DictTransaction = ["chWindCode","chCode","nDate","nTime","nIndex"
,"chFunctionCode","chOrderKind","chBSFlag","nTradePrice"
,"nTradeVolume","nAskOrder","nBidOrder"]
DictOrder = ["chWindCode","chCode","nDate","nTime","nIndex","nOrder"
,"chOrderKind","chFunctionCode","nOrderPrice","nOrderVolume"]
DictOrderQueue = ["chWindCode","chCode","nDate","nTime","nSide","nPrice"
,"nOrderItems","nABItems","nABVolume"]
reqF = {
"chCode":"CU1609.SHF", #证券万得代码(AG1312.SHF)
"nBeginDate":20160901, #开始日期(交易日),为0则从当天,例如20130101
"nEndDate":20160901, #结束日期(交易日),小于等于0则和nBeginDate相同
"nBeginTime":0, #开始时间:若<=0则从头,格式:(HHMMSSmmm)例如94500000 表示 9点45分00秒000毫秒
"nEndTime":0, #结束时间:若<=0则至最后
"nAutoComplete":0 #自动补齐标志:( 0:不自动补齐,1:自动补齐)
}
reqT = {
"chCode":"600030.SH", #证券万得代码(AG1312.SHF)
"nBeginDate":20160901, #开始日期(交易日),为0则从当天,例如20130101
"nEndDate":20160901, #结束日期(交易日),小于等于0则和nBeginDate相同
"nBeginTime":0, #开始时间:若<=0则从头,格式:(HHMMSSmmm)例如94500000 表示 9点45分00秒000毫秒
"nEndTime":0, #结束时间:若<=0则至最后
}
reqKL = {
"chCode":"CU1609.SHF", #证券万得代码(AG1312.SHF)
"nCQFlag":0, #除权标志:0 不复权,1 向前复权,2 向后复权
"nCQDate":0, #复权日期(<=0:全程复权) 格式:YYMMDD,例如20130101表示2013年1月1日
"nQJFlag":0, #全价标志(债券)(0:净价 1:全价)
"nCycType":0, #数据周期:0 秒线、1 分钟、2 日线、3 周线、4 月线、5 季线、6 半年线、7 年线、8 tickBar
"nCycDef":1, #周期数量:仅当nCycType取值:秒、分钟、日线、周线、月线时,这个字段有效。
"nAutoComplete":0, #自动补齐:仅1秒钟线、1分钟线支持这个标志,(不为0:补齐;0:不补齐)
"nBeginDate":20160901, #开始日期(交易日,<0:从上市日期开始; 0:从今天开始)
"nEndDate":20160901, #结束日期(交易日,<=0:跟nBeginDate一样)
"nBeginTime":0, #开始时间,<=0表示从开始,格式:(HHMMSSmmm)例如94500000 表示 9点45分00秒000毫秒
"nEndTime":0 #结束时间,<=0表示到结束,格式:(HHMMSSmmm)例如94500000 表示 9点45分00秒000毫秒
}
class tdbapi():
def __init__(self):
self.tdbapi = tdbpy.tdbapipy()
def TDB_ERROR(self,flag):
# if flag not in [0,-3,-10]:
# print DictError[flag]
return flag
def TDB_Open(self,pSetting):
loginRes = {}
self.tdbapi.TDB_Open(pSetting,loginRes)
return loginRes
def TDB_OpenProxy(self,pSetting,pProxySetting):
loginRes = {}
self.tdbapi.TDB_OpenProxy(pSetting,pProxySetting,loginRes)
return loginRes
def TDB_Close(self):
# print "TDB_Close"
self.TDB_ERROR(self.tdbapi.TDB_Close())
def TDB_GetCodeTable(self, szMarket = 'CF'):
# szMarket in ['SZ','SH','QH','CF','SHF','CZC','DCE']
pCodeTable=[]
flag = self.TDB_ERROR(self.tdbapi.TDB_GetCodeTable(szMarket,pCodeTable))
for i,d in enumerate(pCodeTable):
pCodeTable[i]['chCNName'] = d['chCNName'].decode('gbk','replace')
# pCodeTable[i]['chCNName'] = d['chCNName'].decode('gbk','ignore')
return pCodeTable,flag
def TDB_GetKLine(self,reqKL):
pData = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetKLine(reqKL,pData))
return pData,flag
def TDB_GetTickAB(self,reqT):
pData = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetTickAB(reqT,pData))
return pData,flag
def TDB_GetTick(self,reqT):
pData = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetTick(reqT,pData))
return pData,flag
def TDB_GetFutureAB(self,reqF):
pData = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetFutureAB(reqF,pData))
return pData,flag
def TDB_GetFuture(self,reqF):
pData = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetFuture(reqF,pData))
return pData,flag
def TDB_GetTransaction(self,reqT):
pData = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetTransaction(reqT,pData))
return pData,flag
def TDB_GetOrder(self,reqT):
pData = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetOrder(reqT,pData))
return pData,flag
def TDB_GetOrderQueue(self,reqT):
pData = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetOrderQueue(reqT,pData))
return pData,flag
def TDB_GetCodeInfo(self,szWindCode="IF1609.CF"):
pCodeDict = {}
flag = self.TDB_ERROR(self.tdbapi.TDB_GetCodeInfo(szWindCode,pCodeDict))
return pCodeDict,flag
def TDB_AddFormula(self,szName,szContent):
pRes = {}
flag = self.TDB_ERROR(self.tdbapi.TDB_AddFormula(szName,szContent,pRes))
return pRes,flag
def TDB_GetFormula(self,szFormulaName):
pFormula = []
flag = self.TDB_ERROR(self.tdbapi.TDB_GetFormula(szFormulaName,pFormula))
return pFormula,flag
def TDB_CalcFormula(self,pReqCalc):
pResult = {}
flag = self.TDB_ERROR(self.tdbapi.TDB_CalcFormula(pReqCalc,pResult))
return pResult,flag
def TDB_DeleteFormula(self,szFormulaName):
pDelRes = {}
flag = self.TDB_ERROR(self.tdbapi.TDB_DeleteFormula(self,szFormulaName,pDelRes))
return pDelRes,flag
#------------------------------------------------------------------------------
# 辅助函数
def cleanDataFutureAB(self,dataList):
multi = 10000.0
for i,tmp in enumerate(dataList):
d = {}
d['windCode'] = unicode(tmp['chWindCode'])
d['vtSymbol'] = d['windCode'].split('.')[0] # vt系统代码
d['symbol'] = d['vtSymbol'] # 合约代码
d['exchange'] = u'' # 交易所代码
# 成交数据
d['lastPrice'] = tmp['nPrice']/multi # 最新成交价
d['volume'] = tmp['iAccVolume'] # 最新成交量
d['turnover'] = float(tmp['iAccTurover'])
d['openInterest'] = tmp['nPrice'] # 持仓量
d['preOpenInterest'] = tmp['nPrePosition']
d['openPrice'] = tmp['nOpen']/multi
d['highestPrice'] = tmp['nHigh']/multi
d['lowestPrice'] = tmp['nLow']/multi
d['preClosePrice'] = tmp['nPreClose']/multi
d['settlementPrice'] = tmp['nSettle']/multi
d['preSettlementPrice'] = tmp['nPreSettle']/multi
# tick的时间
d['date'] = unicode(tmp['nDate']) # 日期
d['time'] = unicode(tmp['nTime'])
d['time'] = u'0'*(9-len(d['time']))+d['time']
d['time'] = d['time'][0:2]+':'+d['time'][2:4]+':'+d['time'][4:6]+'.'+d['time'][6:9] # 时间
d['datetime'] = datetime.strptime(d['date'] + ' ' + d['time'], '%Y%m%d %H:%M:%S.%f') # python的datetime时间对象
if i>0 and d['datetime']<=dataList[i-1]['datetime']:
d['datetime'] = dataList[i-1]['datetime']+timedelta(0,0.5,0)
d['time'] = d['datetime'].strftime("%H:%M:%S.%f")[:-3]
# 五档行情
d['bidPrice1'] = tmp['nBidPrice1']/multi
d['askPrice1'] = tmp['nAskPrice1']/multi
d['bidVolume1'] = tmp['nBidVolume1']
d['askVolume1'] = tmp['nAskVolume1']
dataList[i] = d
return dataList
if __name__ == "__main__":
testapi = tdbapi()
testapi.TDB_Open(pSetting)
codeTable,flagcodeTable = testapi.TDB_GetCodeTable("CF")
# dataKLine,flagKLine = testapi.TDB_GetKLine(reqKL)
# dataFutureAB,flagFutureAB = testapi.TDB_GetFutureAB(reqF)
# dataFuture,flagFuture = testapi.TDB_GetFuture(reqF)
# dataTickAB,flagTickAB = testapi.TDB_GetTickAB(reqT)
# dataTick,flagTick = testapi.TDB_GetTick(reqT)
testapi.TDB_Close()
# filepath = "testpy"
# CsvSave(filepath,"dataFutureAB.csv",DictFutureAB,dataFutureAB)
# CsvSave(filepath,"dataFuture.csv",DictFuture,dataFuture)
# CsvSave(filepath,"dataTickAB.csv",DictTickAB,dataTickAB)
# CsvSave(filepath,"dataTick.csv",DictTick,dataTick)
# FileToZip(filepath,filepath+".zip",False)
| mit | 7,127,535,681,113,352,000 | 40.82397 | 119 | 0.577416 | false |
tensorflow/datasets | tensorflow_datasets/core/utils/resource_utils_test.py | 1 | 2052 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.resource_utils."""
import io
import os
import zipfile
from tensorflow_datasets.core.utils import generic_path
from tensorflow_datasets.core.utils import resource_utils
def make_zip_file() -> zipfile.ZipFile:
"""Returns an in-memory zip file."""
data = io.BytesIO()
zf = zipfile.ZipFile(data, 'w')
zf.writestr('a.txt', b'content of a')
zf.writestr('b/c.txt', b'content of c')
zf.writestr('b/d/e.txt', b'content of e')
zf.writestr('b/f.txt', b'content of f')
zf.writestr('g/h/i.txt', b'content of i')
zf.filename = 'alpharep.zip'
return zf
def test_resource_path():
path = resource_utils.ResourcePath(make_zip_file())
assert isinstance(path, os.PathLike)
assert path.joinpath('b/c.txt').read_text() == 'content of c'
sub_dirs = list(path.joinpath('b').iterdir())
assert len(sub_dirs) == 3
for p in sub_dirs: # Childs should be `ResourcePath` instances
assert isinstance(p, resource_utils.ResourcePath)
# Forwarded to `as_path` keep the resource.
path = generic_path.as_path(path)
assert isinstance(path, resource_utils.ResourcePath)
assert path.joinpath() == path
assert path.joinpath('abc', 'def.txt').name == 'def.txt'
def test_tfds_path():
"""Test the proper suffix only, since the prefix can vary."""
assert resource_utils.tfds_path().name == 'tensorflow_datasets'
# assert resource_utils.tfds_write_path().name == 'tensorflow_datasets'
| apache-2.0 | -2,268,741,723,178,552,800 | 33.779661 | 74 | 0.718324 | false |
JazzeYoung/VeryDeepAutoEncoder | theano/tensor/nnet/tests/test_bn.py | 1 | 5616 | from __future__ import absolute_import, print_function, division
import theano
from theano.tests import unittest_tools as utt
import numpy
from theano.tensor.nnet.bn import batch_normalization
def test_BNComposite():
try:
orig = theano.config.compute_test_value
theano.config.compute_test_value = 'raise'
def bn_ref(x, G, B, M, V):
n = (x - M) / V
return n * G + B
numpy.random.seed(1234)
X = 1 + numpy.random.random([10, 20]).astype('float32')
B = 1 + numpy.random.random([20]).astype('float32')
G = 1 + numpy.random.random([20]).astype('float32')
M = 1 + numpy.random.random([20]).astype('float32')
V = 1 + numpy.random.random([20]).astype('float32')
x = theano.tensor.matrix('x')
b = theano.tensor.vector('b')
g = theano.tensor.vector('g')
m = theano.tensor.vector('m')
v = theano.tensor.vector('v')
x.tag.test_value = numpy.random.rand(2, 2).astype(theano.config.floatX)
b.tag.test_value = numpy.random.rand(2).astype(theano.config.floatX)
g.tag.test_value = numpy.random.rand(2).astype(theano.config.floatX)
m.tag.test_value = numpy.random.rand(2).astype(theano.config.floatX)
v.tag.test_value = numpy.random.rand(2).astype(theano.config.floatX)
bn_ref_op = bn_ref(x, g, b, m, v)
f_ref = theano.function([x, b, g, m, v], [bn_ref_op])
res_ref = f_ref(X, G, B, M, V)
for mode in ['low_mem', 'high_mem']:
bn_op = batch_normalization(x, g, b, m, v, mode=mode)
f = theano.function([x, b, g, m, v], [bn_op])
res = f(X, G, B, M, V)
utt.assert_allclose(res_ref, res)
finally:
theano.config.compute_test_value = orig
def test_bn():
def bn_ref(x, G, B, M, V):
n = (x - M) / V
return n * G + B
numpy.random.seed(1234)
X = 1 + numpy.random.random([10, 20]).astype('float32')
B = 1 + numpy.random.random([20]).astype('float32')
G = 1 + numpy.random.random([20]).astype('float32')
M = 1 + numpy.random.random([20]).astype('float32')
V = 1 + numpy.random.random([20]).astype('float32')
x = theano.tensor.matrix('x')
b = theano.tensor.vector('b')
g = theano.tensor.vector('g')
m = theano.tensor.vector('m')
v = theano.tensor.vector('v')
bn_ref_op = bn_ref(x, g, b, m, v)
f_ref = theano.function([x, b, g, m, v], [bn_ref_op])
res_ref = f_ref(X, G, B, M, V)
for mode in ['low_mem', 'high_mem']:
bn_op = batch_normalization(x, g, b, m, v, mode=mode)
f = theano.function([x, b, g, m, v], [bn_op])
res = f(X, G, B, M, V)
utt.assert_allclose(res_ref, res)
def bn(inputs, gamma, beta, mean, std):
return batch_normalization(inputs, gamma, beta, mean, std, mode=mode)
utt.verify_grad(bn, [X, G, B, M, V])
bn_ref_op = bn_ref(x, g, b, x.mean(axis=0, keepdims=True), x.std(axis=0, keepdims=True))
f_ref = theano.function([x, b, g], [bn_ref_op])
res_ref = f_ref(X, G, B)
for mode in ['low_mem', 'high_mem']:
bn_op = batch_normalization(x, g, b, x.mean(axis=0, keepdims=True), x.std(axis=0, keepdims=True), mode=mode)
f = theano.function([x, b, g], [bn_op])
res = f(X, G, B)
utt.assert_allclose(res_ref, res)
def bn(inputs, gamma, beta, mean, std):
return batch_normalization(inputs, gamma, beta, mean, std, mode=mode)
utt.verify_grad(batch_normalization, [X, G, B,
X.mean(axis=0)[numpy.newaxis], X.std(axis=0)[numpy.newaxis]])
def test_bn_feature_maps():
def bn_ref(x, G, B, M, V):
n = (x - M) / V
return n * G + B
numpy.random.seed(1234)
X = 1 + numpy.random.random([2, 3, 4, 4]).astype('float32')
B = 1 + numpy.random.random([3]).astype('float32')
G = 1 + numpy.random.random([3]).astype('float32')
M = 1 + numpy.random.random([3]).astype('float32')
V = 1 + numpy.random.random([3]).astype('float32')
x = theano.tensor.tensor4('x')
b = theano.tensor.vector('b')
g = theano.tensor.vector('g')
m = theano.tensor.vector('m')
v = theano.tensor.vector('v')
bn_ref_op = bn_ref(x,
g.dimshuffle('x', 0, 'x', 'x'),
b.dimshuffle('x', 0, 'x', 'x'),
m.dimshuffle('x', 0, 'x', 'x'),
v.dimshuffle('x', 0, 'x', 'x'))
f_ref = theano.function([x, b, g, m, v], [bn_ref_op])
res_ref = f_ref(X, G, B, M, V)
for mode in ['low_mem', 'high_mem']:
bn_op = batch_normalization(x,
g.dimshuffle('x', 0, 'x', 'x'),
b.dimshuffle('x', 0, 'x', 'x'),
m.dimshuffle('x', 0, 'x', 'x'),
v.dimshuffle('x', 0, 'x', 'x'),
mode=mode)
f = theano.function([x, b, g, m, v], [bn_op])
res = f(X, G, B, M, V)
utt.assert_allclose(res_ref, res)
def conv_bn(inputs, gamma, beta, mean, std):
return batch_normalization(inputs,
gamma.dimshuffle('x', 0, 'x', 'x'),
beta.dimshuffle('x', 0, 'x', 'x'),
mean.dimshuffle('x', 0, 'x', 'x'),
std.dimshuffle('x', 0, 'x', 'x'),
mode=mode)
utt.verify_grad(conv_bn, [X, G, B, M, V])
| bsd-3-clause | -4,517,601,108,685,921,300 | 38.549296 | 116 | 0.507301 | false |
SiniAghilas/scripts | source/python/script_martin_algorithm.py | 1 | 5564 | #!/usr/bin/jython
import os
import sys
from java.io import FileOutputStream
import logging
import optparse
import getopt
import string
__all__ = []
__version__ = 0.1
__date__ = '15-03-2015'
__updated__ ='21-03-2016'
__author__='asini'
## related path
if os.path.dirname(sys.argv[0])!= "":
directery_name=os.path.dirname(sys.argv[0])+"/"
else :
directery_name="";
#load class (binary path)
os.sys.path.append(directery_name+"bin")
#build path directory
def get_filepaths(directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
# Run the above function and store its results in a variable.
jar_files_paths = get_filepaths(directery_name+"lib")
# load all jar file
for jarfilename in jar_files_paths:
os.sys.path.append(jarfilename)
# import Library
import org.netlib.lapack
from fr.loria.parole.jsnoori.model.speech.pitch import Pitch
from fr.loria.parole.jsnoori.model.speech.pitch import AsyncPitch
from fr.loria.parole.jsnoori.model.speech import Spectrogram
from java.util import Vector
from fr.loria.parole.jsnoori.util.file.segmentation import TextGridSegmentationFileUtils
from fr.loria.parole.jsnoori.model import ResourcePath
from fr.loria.parole.jsnoori.model import JSnooriProperties
from fr.loria.parole.jsnoori.model import Constants
from fr.loria.parole.jsnoori.model.audio import AudioSignal
from fr.loria.parole.jsnoori.model import Constants
from fr.loria.parole.jsnoori.util import Energy
from fr.loria.parole.jsnoori.util import TimeConversion
## Options
## Options
#option -1=input file
#option 0 =output file
# option 1= sexe of speaker
# option 2= time scale's (ms,s)
# option 3= print on file txt or console
parser=optparse.OptionParser()
parser.add_option("-i", dest="input",type="string",default=None,help="take input file",)
parser.add_option("-o",dest="output",type="string",default=None,help="write output to file")
parser.add_option("-w",dest="window",type="int",default=32,help="size window (Ms)")
parser.add_option("-t",dest="shift",type="int",default=8,help="time shift (Ms)")
parser.add_option("--ts",dest="scale",type="string",default="ms",help="define time scale's")
parser.add_option("-s",dest="sexe",type="int",default=1,help="choose sexe of speakers ")
parser.add_option("-p",dest="print",help="print result on console")
parser.add_option("--dbmin",dest='dbmin',default=60.0,type="float",help="lowest energy (db)",)
#print pitchs result
def __print__(pitchs,f0median,fmin,fmax, filename,inputfile ,signal,sexe,shift):
#open file
f=open(filename,"w")
t=0
f.write("File name: "+inputfile+"\n")
f.write(str(signal))
f.write("sexe: "+str(sexe)+"\n")
f.write("f0median: "+str(f0median)+"\n")
f.write("fmax: "+str(fmax)+"\n")
f.write("fmin: "+str(fmin)+"\n")
f.write("f00_corrv\tf00_hz\t\tf01_corrv\tf01_hz\t\tf01_corrv\tf02_hz\n")
for k in range(0,len(pitchs)):
line='%.3f\t\t%d\t\t%.3f\t\t%d\t\t%.3f\t\t%d'%(pitchs.get(k).fst[0],pitchs.get(k).snd[0],pitchs.get(k).fst[1],pitchs.get(k).snd[1],pitchs.get(k).fst[2],pitchs.get(k).snd[2])
f.write(line+"\n")
t+=shift
f.close()
# check options
# opts, args = getopt.getopt(sys.argv[1:], "ho:v", ["help", "output="])
# print len(args)
try:
(options, args)=parser.parse_args()
if options.input==None:
parser.error('this command cannot be empty')
except Exception, e:
raise e
return options,args
try:
command=options.input
except Exception, e:
command=None
if command!=None:
# load wave signal
signal=AudioSignal(command)
# pitch time shift
timeShift=options.shift
# pitch Window
window=options.window
# Pitch's Object
pitch=Pitch(32,timeShift)
# male: 1; female: 0; unknow: -1.
sexe=options.sexe
# compute pitchs
pitchs=pitch.computePitch(signal,sexe)
# compute median F0
<<<<<<< HEAD:script_martin_algorithm.py
f0median=pitch.pitchMedian()
# get f0 minmale
fmin=pitch.getF0Min()
# get f0 maximale
fmax=pitch.getF0Max()
# candidate
candidatesList=pitch.getPitchs();
# print int file
=======
pitch.pitchMedian();
#candidate
candidatesList=pitch.getPitchs()
# pitch size
pitch_count=len(candidatesList)
# Conversion frome time to samples
sampleSift=int(TimeConversion.enEchtf(timeShift, signal))
# new window
windowUp=signal.getSampleCount()-(sampleSift*pitch_count)
#compute energy
energy= energy=Energy(wav,options.fmin, options.fmax, options.dbmin, options.durationwindow,options.fftOrder,options.timeshift)
>>>>>>> 21507d8a85c4fc76c044b5b886a96cdcae618976:source/python/script_martin_algorithm.py
if(options.output!=None):
__print__(candidatesList,f0median,fmin,fmax,options.output,options.input,signal,sexe,timeShift)
else:
print "error"
| gpl-3.0 | -2,692,751,104,350,632,400 | 31.923077 | 183 | 0.682063 | false |
hankcs/HanLP | hanlp/components/mtl/tasks/tok/reg_tok.py | 1 | 5225 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-02 16:51
import logging
from typing import Union, List, Dict, Any, Iterable, Tuple
import torch
from alnlp.modules import util
from torch import Tensor
from torch.utils.data import DataLoader
from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader
from hanlp.common.transform import FieldLength, TransformList
from hanlp.components.mtl.tasks import Task
from hanlp.datasets.tokenization.txt import TextTokenizingDataset
from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder
from hanlp.layers.transformers.pt_imports import PreTrainedTokenizer
from hanlp.metrics.chunking.binary_chunking_f1 import BinaryChunkingF1
from hanlp.transform.transformer_tokenizer import TransformerSequenceTokenizer
from hanlp_common.util import merge_locals_kwargs
def generate_token_span_tuple(sample: dict):
prefix_mask = sample.get('text_prefix_mask', None)
if prefix_mask:
sample['span_tuple'] = spans = []
previous_prefix = 0
prefix_mask_ = prefix_mask[1:-1]
for i, mask in enumerate(prefix_mask_):
if i and mask:
spans.append((previous_prefix, i))
previous_prefix = i
spans.append((previous_prefix, len(prefix_mask_)))
return sample
class RegressionTokenizingDecoder(torch.nn.Linear):
def __init__(self, in_features: int, out_features: int = 1, bias: bool = ...) -> None:
super().__init__(in_features, out_features, bias)
# noinspection PyMethodOverriding
def forward(self, input: Tensor, **kwargs) -> Tensor:
return super().forward(input[:, 1:-1, :]).squeeze_(-1)
class RegressionTokenization(Task):
def __init__(self, trn: str = None, dev: str = None, tst: str = None, sampler_builder: SamplerBuilder = None,
dependencies: str = None, scalar_mix: ScalarMixWithDropoutBuilder = None,
use_raw_hidden_states=True, lr=1e-3, separate_optimizer=False, delimiter=None,
max_seq_len=None, sent_delimiter=None) -> None:
super().__init__(**merge_locals_kwargs(locals()))
def build_criterion(self, **kwargs):
return torch.nn.BCEWithLogitsLoss(reduction='mean')
def build_metric(self, **kwargs):
return BinaryChunkingF1()
# noinspection PyMethodOverriding
def build_model(self, encoder_size, training=True, **kwargs) -> torch.nn.Module:
return RegressionTokenizingDecoder(encoder_size)
def predict(self, data: Union[str, List[str]], batch_size: int = None, **kwargs):
pass
def build_dataloader(self,
data,
transform: TransformList = None,
training=False,
device=None,
logger: logging.Logger = None,
tokenizer: PreTrainedTokenizer = None,
**kwargs) -> DataLoader:
assert tokenizer
dataset = TextTokenizingDataset(data, cache=isinstance(data, str), delimiter=self.config.sent_delimiter,
generate_idx=isinstance(data, list),
max_seq_len=self.config.max_seq_len,
sent_delimiter=self.config.sent_delimiter,
transform=[
TransformerSequenceTokenizer(tokenizer,
'text',
ret_prefix_mask=True,
ret_subtokens=True,
),
FieldLength('text_input_ids', 'text_input_ids_length', delta=-2),
generate_token_span_tuple])
return PadSequenceDataLoader(
batch_sampler=self.sampler_builder.build(self.compute_lens(data, dataset, 'text_input_ids', 'text'),
shuffle=training),
device=device,
dataset=dataset)
def decode_output(self,
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
batch: Dict[str, Any], **kwargs) -> List[Tuple[int, int]]:
spans = BinaryChunkingF1.decode_spans(output > 0, batch['text_input_ids_length'])
return spans
def update_metrics(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
prediction: List[Tuple[int, int]], metric: BinaryChunkingF1):
metric.update(prediction, batch['span_tuple'])
def compute_loss(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any], criterion):
mask = util.lengths_to_mask(batch['text_input_ids_length'])
return criterion(output[mask], batch['text_prefix_mask'][:, 1:-1][mask].to(torch.float))
| apache-2.0 | 9,092,129,869,008,547,000 | 46.93578 | 115 | 0.576459 | false |
CSIRT-MU/Stream4Flow | applications/detection/ddos/spark/detection_ddos.py | 1 | 10022 | # -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2016 Michal Stefanik <[email protected]>, Milan Cermak <[email protected]>
# Institute of Computer Science, Masaryk University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Description: A method for detection of DoS/DDoS attacks based on an evaluation of
the incoming/outgoing packet volume ratio and its variance to the long-time (long window) ratio.
Usage:
detection_ddos.py -iz <input-zookeeper-hostname>:<input-zookeeper-port> -it <input-topic>
-oz <output-zookeeper-hostname>:<output-zookeeper-port> -ot <output-topic> -nf <regex for network range>
To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. Then
you can run the example
$ /home/spark/applications/run-application.sh detection/ddos/spark/detection_ddos.py
-iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -nf "10\.10\..+"
"""
import sys # Common system functions
import os # Common operating system functions
import argparse # Arguments parser
import ujson as json # Fast JSON parser
import socket # Socket interface
import re # Regular expression match
from termcolor import cprint # Colors in the console output
from pyspark import SparkContext # Spark API
from pyspark.streaming import StreamingContext # Spark streaming API
from pyspark.streaming.kafka import KafkaUtils # Spark streaming Kafka receiver
from kafka import KafkaProducer # Kafka Python client
def send_to_kafka(data, producer, topic):
"""
Send given data to the specified kafka topic.
:param data: data to send
:param producer: producer that sends the data
:param topic: name of the receiving kafka topic
"""
producer.send(topic, str(data))
def print_and_send(rdd, producer, topic):
"""
Transform given computation results into the JSON format and send them to the specified host.
JSON format:
{"@type": "detection.ddos", "host" : <destination_ip> "shortratio" : <short-term ratio>,
"longratio": <long-term ration>, "attackers": [set of attackers]}
:param rdd: rdd to be parsed and sent
:param producer: producer that sends the data
:param topic: name of the receiving kafka topic
"""
results = ""
rdd_map = rdd.collectAsMap()
# generate JSON response for each aggregated rdd
for host, stats in rdd_map.iteritems():
short_ratio = float(stats[0][0]) / stats[0][1]
long_ratio = float(stats[1][0]) / stats[1][1]
attackers = list(stats[0][2])
new_entry = {"@type": "detection.ddos",
"dst_ip": host,
"shortratio": short_ratio,
"longratio": long_ratio,
"attackers": attackers}
results += ("%s\n" % json.dumps(new_entry))
# Print results to stdout
cprint(results)
# Send results to the specified kafka topic
send_to_kafka(results, producer, topic)
def inspect_ddos(stream_data):
"""
Main method performing the flows aggregation in short and long window and comparison of their ratios
:type stream_data: Initialized spark streaming context.
"""
# Create regex for monitored network
local_ip_pattern = re.compile(network_filter)
# Filter only the data with known source and destination IP
filtered_stream_data = stream_data \
.map(lambda x: json.loads(x[1])) \
.filter(lambda json_rdd: ("ipfix.sourceIPv4Address" in json_rdd.keys() and
"ipfix.destinationIPv4Address" in json_rdd.keys()
))
# Create stream of base windows
small_window = filtered_stream_data.window(base_window_length, base_window_length)
# Count number of incoming packets from each source ip address for each destination ip address
# from a given network range
incoming_small_flows_stats = small_window \
.filter(lambda json_rdd: re.match(local_ip_pattern, json_rdd["ipfix.destinationIPv4Address"])) \
.map(lambda json_rdd: (json_rdd["ipfix.destinationIPv4Address"],
(json_rdd["ipfix.packetDeltaCount"], 0, {json_rdd["ipfix.sourceIPv4Address"]})))
# Count number of outgoing packets for each source ip address from a given network range
outgoing_small_flows_stats = small_window \
.filter(lambda json_rdd: re.match(local_ip_pattern, json_rdd["ipfix.sourceIPv4Address"])) \
.map(lambda json_rdd: (json_rdd["ipfix.sourceIPv4Address"],
(0, json_rdd["ipfix.packetDeltaCount"], set()))) \
# Merge DStreams of incoming and outgoing number of packets
small_window_aggregated = incoming_small_flows_stats.union(outgoing_small_flows_stats)\
.reduceByKey(lambda actual, update: (actual[0] + update[0],
actual[1] + update[1],
actual[2].union(update[2])))
# Create long window for long term profile
union_long_flows = small_window_aggregated.window(long_window_length, base_window_length)
long_window_aggregated = union_long_flows.reduceByKey(lambda actual, update: (actual[0] + update[0],
actual[1] + update[1])
)
# Union DStreams with small and long window
# RDD in DStream in format (local_device_IPv4, (
# (short_inc_packets, short_out_packets, short_source_IPv4s),
# (long_inc_packets, long_out_packets)))
windows_union = small_window_aggregated.join(long_window_aggregated)
# Filter out zero values to prevent division by zero
nonzero_union = windows_union.filter(lambda rdd: rdd[1][0][1] != 0 and rdd[1][1][1] != 0)
# Compare incoming and outgoing transfers volumes and filter only those suspicious
# -> overreaching the minimal_incoming volume of packets and
# -> short-term ratio is greater than long-term ratio * threshold
windows_union_filtered = nonzero_union.filter(lambda rdd: rdd[1][0][0] > minimal_incoming and
float(rdd[1][0][0]) / rdd[1][0][1] > float(rdd[1][1][0]) /
rdd[1][1][1] * threshold
)
# Return the detected records
return windows_union_filtered
if __name__ == "__main__":
# Prepare arguments parser (automatically creates -h argument).
parser = argparse.ArgumentParser()
parser.add_argument("-iz", "--input_zookeeper", help="input zookeeper hostname:port", type=str, required=True)
parser.add_argument("-it", "--input_topic", help="input kafka topic", type=str, required=True)
parser.add_argument("-oz", "--output_zookeeper", help="output zookeeper hostname:port", type=str, required=True)
parser.add_argument("-ot", "--output_topic", help="output kafka topic", type=str, required=True)
parser.add_argument("-nf", "--network_filter", help="regular expression filtering the watched IPs", type=str, required=True)
# Parse arguments.
args = parser.parse_args()
# Set variables
application_name = os.path.basename(sys.argv[0]) # Application name used as identifier
kafka_partitions = 1 # Number of partitions of the input Kafka topic
# Set method parameters:
threshold = 50 # Minimal increase of receive/sent packets ratio
minimal_incoming = 100000 # Minimal count of incoming packets
long_window_length = 7200 # Window length for average ratio computation (must be a multiple of microbatch interval)
base_window_length = 30 # Window length for basic computation (must be a multiple of microbatch interval)
network_filter = args.network_filter # Filter for network for detection (regex filtering), e.g. "10\.10\..+"
# Spark context initialization
sc = SparkContext(appName=application_name + " " + " ".join(sys.argv[1:])) # Application name used as the appName
ssc = StreamingContext(sc, 1) # Spark microbatch is 1 second
# Initialize input DStream of flows from specified Zookeeper server and Kafka topic
input_stream = KafkaUtils.createStream(ssc, args.input_zookeeper, "spark-consumer-" + application_name,
{args.input_topic: kafka_partitions})
# Run the detection of ddos
ddos_result = inspect_ddos(input_stream)
# Initialize kafka producer
kafka_producer = KafkaProducer(bootstrap_servers=args.output_zookeeper,
client_id="spark-producer-" + application_name)
# Process the results of the detection and send them to the specified host
ddos_result.foreachRDD(lambda rdd: print_and_send(rdd, kafka_producer, args.output_topic))
# Send any remaining buffered records
kafka_producer.flush()
# Start input data processing
ssc.start()
ssc.awaitTermination()
| mit | -2,848,101,401,985,156,600 | 45.398148 | 128 | 0.669627 | false |
dradux/tracker | web/app.py | 1 | 3934 | # app.py
import sys, os, logging
from logging.handlers import RotatingFileHandler
import os.path as op
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from flask.ext.security import login_required, Security, SQLAlchemyUserDatastore, utils
import flask_admin as admin
from config import BaseConfig
import config as Config
from werkzeug.contrib.fixers import ProxyFix
# Create application
app = Flask(__name__)
app.config.from_object(BaseConfig)
db = SQLAlchemy(app)
from models import *
from views import *
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
#~ @TODO: find a better way as the following causes issues with flex-migrate.
#~
#~ @app.before_first_request
#~ def before_first_request():
#~ # Create any database tables that don't exist yet.
#~ db.create_all()
#~ #app.logger.info('### NOTICES ###')
#~ app.logger.debug('* setting DEFAULT_ADMIN_USER: %s' % (Config.DefaultConfig.DEFAULT_ADMIN_USER))
#~ app.logger.debug('* setting DEFAULT_ADMIN_PASSWORD: %s' % (Config.DefaultConfig.DEFAULT_ADMIN_PASSWORD))
#~ # Create the Roles "admin" and "user" -- unless they already exist
#~ user_datastore.find_or_create_role(name='admin', description='Administrator')
#~ user_datastore.find_or_create_role(name='user', description='User')
#~ encrypted_password = utils.encrypt_password(Config.DefaultConfig.DEFAULT_ADMIN_PASSWORD)
#~ app.logger.debug('- encrypted_password: %s' % (encrypted_password))
#~ if not user_datastore.get_user(Config.DefaultConfig.DEFAULT_ADMIN_USER):
#~ user_datastore.create_user(name='admin', username='admin', email=Config.DefaultConfig.DEFAULT_ADMIN_USER, password=encrypted_password)
#~ # Commit any database changes; the User and Roles must exist before we can add a Role to the User
#~ db.session.commit()
#~ # assign roles
#~ user_datastore.add_role_to_user(Config.DefaultConfig.DEFAULT_ADMIN_USER, 'admin')
#~ user_datastore.add_role_to_user(Config.DefaultConfig.DEFAULT_ADMIN_USER, 'user')
#~ db.session.commit()
@app.route('/')
@login_required
def index():
return render_template('index.html')
@app.route('/logout')
def logout_view():
login.logout_user()
return redirect(url_for('admin.index'))
@app.route('/change_password')
def change_password():
return render_template('security/change_password.html')
# Create admin
admin = admin.Admin(app, name='TRacker', base_template='layout.html', index_view=HomeView(name='Home'))
# Add views
admin.add_view(TestResultView(TestResult, db.session, name='Test Results'))
admin.add_view(RunMetricView(RunMetric, db.session, name='Run Metrics', category='Config'))
admin.add_view(ServerView(Server, db.session, name='Servers', category='Config'))
admin.add_view(TestResultStatusView(TestResultStatus, db.session, name='Statuses', category='Config'))
admin.add_view(TagView(Tag, db.session, name='Tags', category='Config'))
admin.add_view(TestPlanView(TestPlan, db.session, name='Test Plans', category='Config'))
admin.add_view(UserAdmin(User, db.session, name='Users', category='Config'))
#admin.add_view(AccountLogoutView(name='Logout', endpoint='account_logout', category='Account'))
admin.add_view(AccountView(name='User', endpoint='account_user', category='Account'))
admin.add_link(LogoutMenuLink(name='Logout', category='', url="/logout"))
admin.add_link(LoginMenuLink(name='Login', category='', url="/login?next=/admin/"))
admin.add_view(OnlineHelpView(name='Online Help', endpoint='online_help', category='Help'))
admin.add_view(AboutView(name='About', endpoint='help_about', category='Help'))
#proxyfix in case you are running your site on a non-standard port/proxying.
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == '__main__':
app_dir = op.realpath(os.path.dirname(__file__))
# Start app
app.run(debug=True)
| gpl-3.0 | -1,318,932,726,950,668,000 | 38.737374 | 145 | 0.728266 | false |
snyaggarwal/pex | tests/test_pex_binary.py | 1 | 3548 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from contextlib import contextmanager
from optparse import OptionParser
from pex.bin.pex import configure_clp, configure_clp_pex_resolution
from pex.fetcher import PyPIFetcher
from pex.package import SourcePackage, WheelPackage
from pex.resolver_options import ResolverOptionsBuilder
from pex.sorter import Sorter
@contextmanager
def parser_pair():
builder = ResolverOptionsBuilder()
parser = OptionParser()
yield builder, parser
def test_clp_no_pypi_option():
with parser_pair() as (builder, parser):
configure_clp_pex_resolution(parser, builder)
assert len(builder._fetchers) == 1
options, _ = parser.parse_args(args=['--no-pypi'])
assert len(builder._fetchers) == 0, '--no-pypi should remove fetchers.'
assert options.repos == builder._fetchers
def test_clp_pypi_option_duplicate():
with parser_pair() as (builder, parser):
configure_clp_pex_resolution(parser, builder)
assert len(builder._fetchers) == 1
options, _ = parser.parse_args(args=['--pypi'])
assert len(builder._fetchers) == 1
assert options.repos == builder._fetchers
# TODO(wickman) We should probably add fetchers in order.
def test_clp_repo_option():
with parser_pair() as (builder, parser):
configure_clp_pex_resolution(parser, builder)
assert len(builder._fetchers) == 1
options, _ = parser.parse_args(args=['-f', 'http://www.example.com'])
assert len(builder._fetchers) == 2
assert builder._fetchers == options.repos
def test_clp_index_option():
with parser_pair() as (builder, parser):
configure_clp_pex_resolution(parser, builder)
assert len(builder._fetchers) == 1
options, _ = parser.parse_args(args=['-i', 'http://www.example.com'])
assert len(builder._fetchers) == 2
assert builder._fetchers == options.repos
assert builder._fetchers[1] == PyPIFetcher('http://www.example.com')
def test_clp_build_precedence():
with parser_pair() as (builder, parser):
configure_clp_pex_resolution(parser, builder)
assert builder._precedence == Sorter.DEFAULT_PACKAGE_PRECEDENCE
parser.parse_args(args=['--no-build'])
assert SourcePackage not in builder._precedence
parser.parse_args(args=['--build'])
assert SourcePackage in builder._precedence
options, _ = parser.parse_args(args=['--no-wheel'])
assert WheelPackage not in builder._precedence
assert not options.use_wheel
options, _ = parser.parse_args(args=['--wheel'])
assert WheelPackage in builder._precedence
assert options.use_wheel
# Make sure that we're doing append and not replace
def test_clp_requirements_txt():
parser, builder = configure_clp()
options, _ = parser.parse_args(args='-r requirements1.txt -r requirements2.txt'.split())
assert options.requirement_files == ['requirements1.txt', 'requirements2.txt']
def test_clp_constraints_txt():
parser, builder = configure_clp()
options, _ = parser.parse_args(args='--constraint requirements1.txt'.split())
assert options.constraint_files == ['requirements1.txt']
def test_clp_prereleases():
with parser_pair() as (builder, parser):
configure_clp_pex_resolution(parser, builder)
options, _ = parser.parse_args(args=[])
assert not builder._allow_prereleases
options, _ = parser.parse_args(args=['--no-pre'])
assert not builder._allow_prereleases
options, _ = parser.parse_args(args=['--pre'])
assert builder._allow_prereleases
| apache-2.0 | 1,874,773,087,032,883,500 | 33.784314 | 90 | 0.710541 | false |
pony012/PruebaServicioCucea | app/app.py | 1 | 2764 | # from flask import Flask
from flask import render_template, redirect, url_for, flash
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import Required, Length, EqualTo, DataRequired
# from flask_mongoengine import MongoEngine
from flask_security import current_user, login_user
from flask_security.core import UserMixin, AnonymousUser
import config
from db import user_datastore
# from models.User import User
# from models.Role import Role
from models.Usuario import Usuario
app = config.app
db_sql = config.db_sql
# Create a user to test with
@app.before_first_request
def create_user():
db_sql.drop_all()
db_sql.create_all()
user_datastore.create_user(email='alan', password='password')
user_datastore.commit()
# if(User.objects.filter(email='[email protected]').count() == 0):
# db.security.datastore.create_user(email='[email protected]',
# password='password')
class LoginForm2(FlaskForm):
email = StringField('Correo', validators=[Required(), Length(1, 64)])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Recordar', validators=[Required()])
submit = SubmitField('Login')
@app.route('/login', methods=['GET', 'POST'])
def login():
"""User login route."""
if current_user.is_authenticated():
# if user is logged in we get out of here
return redirect(url_for('index'))
form = LoginForm2()
if form.validate_on_submit():
user = Usuario.query.filter_by(username=form.email.data).first()
if user is None or not user.verify_password(form.password.data) or \
not user.verify_totp(form.token.data):
flash('Invalid username, password or token.')
return redirect(url_for('login'))
# log user in
login_user(user)
flash('You are now logged in!')
return redirect(url_for('index'))
print form
print "Form"
return render_template('login_user.html', form2=form)
class user_role_form(FlaskForm):
user = StringField(u'Usuario', validators=[DataRequired])
role = StringField(u'Rol', validators=[DataRequired])
submit = SubmitField(label="Ligar")
@app.route('/user_role/<user>/<role>')
def user_role(user, role):
form = user_role_form()
return render_template('user_role.html', form=form, user=user, role=role)
# app.add_url_rule('/user_role/<user>/<role>', view_func=user_role)
# Views
@app.route('/')
# @login_required
def home():
user = UserMixin
if user.is_anonymous:
user = AnonymousUser
return render_template('index.html', user=user)
if __name__ == '__main__':
app.run()
| mit | -703,852,059,490,693,200 | 30.770115 | 77 | 0.671852 | false |
utarsuno/quasar_source | deprecated/c_processes/c_compiler.py | 1 | 1140 | # coding=utf-8
"""This module, c_compiler.py, is a utility program to compiling c programs."""
from universal_code.shell_abstraction.shell_command_runner import run_shell_command_and_get_results
def create_object_file(source_file_path, object_output_path):
"""Creates an object file."""
return _run_command_and_return_output('gcc -c ' + source_file_path + ' -o ' + object_output_path)
def create_executable(source_file_path, c_libraries, object_output_path):
"""Creates a new executable file."""
object_file_paths_as_string = ''
for o in c_libraries:
object_file_paths_as_string += ' ' + o.path_to_object_file + ' '
return _run_command_and_return_output('gcc -Wall -O2 ' + source_file_path + ' ' + object_file_paths_as_string + ' -o ' + object_output_path)
def _run_command_and_return_output(shell_command):
"""Runs the provided shell command."""
output_stdout, output_stderr = run_shell_command_and_get_results(shell_command)
output_stdout = output_stdout.decode('utf-8')
output_stderr = output_stderr.decode('utf-8')
if len(output_stderr):
print('ERROR')
print(output_stderr)
return output_stdout, output_stderr
| mit | -659,934,029,010,199,700 | 38.310345 | 141 | 0.716667 | false |
MSEMJEJME/Get-Dumped | renpy/easy.py | 1 | 4218 | # Copyright 2004-2012 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Functions that make the user's life easier.
import renpy.display
import contextlib
import time
def color(c):
"""
This function returns a color tuple, from a hexcode string or a
color tuple.
"""
if isinstance(c, tuple) and len(c) == 4:
return c
if c is None:
return c
if isinstance(c, basestring):
if c[0] == '#':
c = c[1:]
if len(c) == 6:
r = int(c[0]+c[1], 16)
g = int(c[2]+c[3], 16)
b = int(c[4]+c[5], 16)
a = 255
elif len(c) == 8:
r = int(c[0]+c[1], 16)
g = int(c[2]+c[3], 16)
b = int(c[4]+c[5], 16)
a = int(c[6]+c[7], 16)
elif len(c) == 3:
r = int(c[0], 16) * 0x11
g = int(c[1], 16) * 0x11
b = int(c[2], 16) * 0x11
a = 255
elif len(c) == 4:
r = int(c[0], 16) * 0x11
g = int(c[1], 16) * 0x11
b = int(c[2], 16) * 0x11
a = int(c[3], 16) * 0x11
else:
raise Exception("Color string must be 3, 4, 6, or 8 hex digits long.")
return (r, g, b, a)
raise Exception("Not a color: %r" % (c,))
def displayable_or_none(d):
if isinstance(d, renpy.display.core.Displayable):
return d
if d is None:
return d
if isinstance(d, basestring):
if d[0] == '#':
return renpy.store.Solid(d)
elif "." in d:
return renpy.store.Image(d)
elif not d:
raise Exception("Displayable cannot be an empty string.")
else:
return renpy.store.ImageReference(tuple(d.split()))
# We assume the user knows what he's doing in this case.
if hasattr(d, 'parameterize'):
return d
if d is True or d is False:
return d
raise Exception("Not a displayable: %r" % (d,))
def displayable(d):
"""
:doc: udd_utility
:name: renpy.displayable
This takes `d`, which may be a displayable object or a string. If it's
a string, it converts that string into a displayable using the usual
rules.
"""
if isinstance(d, renpy.display.core.Displayable):
return d
if isinstance(d, basestring):
if not d:
raise Exception("An empty string cannot be used as a displayable.")
elif d[0] == '#':
return renpy.store.Solid(d)
elif "." in d:
return renpy.store.Image(d)
else:
return renpy.store.ImageReference(tuple(d.split()))
# We assume the user knows what he's doing in this case.
if hasattr(d, 'parameterize'):
return d
if d is True or d is False:
return d
raise Exception("Not a displayable: %r" % (d,))
def predict(d):
d = renpy.easy.displayable_or_none(d)
if d is not None:
renpy.display.predict.displayable(d)
@contextlib.contextmanager
def timed(name):
start = time.time()
yield
print "{0}: {1:.2f} ms".format(name, (time.time() - start) * 1000.0)
| gpl-2.0 | 1,460,786,579,161,761,800 | 28.704225 | 82 | 0.584637 | false |
carbonblack/cb-event-duplicator | cbopensource/tools/eventduplicator/transporter.py | 1 | 14136 | from __future__ import absolute_import, division, print_function
import logging
import datetime
from cbopensource.tools.eventduplicator.utils import get_process_id, get_parent_process_id
import sys
__author__ = 'jgarman'
log = logging.getLogger(__name__)
class Transporter(object):
def __init__(self, input_source, output_sink, tree=False):
self.input_md5set = set()
self.input_proc_guids = set()
self.input = input_source
self.output = output_sink
self.mungers = [CleanseSolrData()]
self.seen_sensor_ids = set()
self.seen_feeds = set()
self.seen_feed_ids = set()
self.traverse_tree = tree
def add_anonymizer(self, munger):
self.mungers.append(munger)
def output_process_doc(self, doc):
for munger in self.mungers:
doc = munger.munge_document('proc', doc)
sys.stdout.write('%-70s\r' % ("Uploading process %s..." % get_process_id(doc)))
sys.stdout.flush()
self.output.output_process_doc(doc)
def output_feed_doc(self, doc):
for munger in self.mungers:
doc = munger.munge_document('feed', doc)
# check if we have seen this feed_id before
feed_id = doc['feed_id']
if feed_id not in self.seen_feed_ids:
feed_metadata = self.input.get_feed_metadata(feed_id)
if feed_metadata:
# note that without feed metadata, bad things may happen on the Cb UI side...
self.output.output_feed_metadata(feed_metadata)
self.seen_feed_ids.add(feed_id)
self.output.output_feed_doc(doc)
def output_binary_doc(self, doc):
for munger in self.mungers:
# note that the mungers are mutating the data in place, anyway.
doc = munger.munge_document('binary', doc)
sys.stdout.write('%-70s\r' % ("Uploading binary %s..." % doc['md5']))
sys.stdout.flush()
self.output.output_binary_doc(doc)
def output_sensor_info(self, doc):
for munger in self.mungers:
# note that the mungers are mutating the data in place, anyway.
doc['sensor_info'] = munger.munge_document('sensor', doc['sensor_info'])
self.output.output_sensor_info(doc)
def update_sensors(self, proc):
sensor_id = proc.get('sensor_id', 0)
if not sensor_id:
return []
if sensor_id and sensor_id not in self.seen_sensor_ids:
# notify caller that this sensor_id has to be inserted into the target
self.seen_sensor_ids.add(sensor_id)
return [sensor_id]
return []
def update_md5sums(self, proc):
md5s = set()
process_md5 = proc.get('process_md5', None)
if process_md5 and process_md5 != '0'*32:
md5s.add(proc.get('process_md5'))
for modload_complete in proc.get('modload_complete', []):
fields = modload_complete.split('|')
md5s.add(fields[1])
retval = md5s - self.input_md5set
self.input_md5set |= md5s
return retval
def traverse_up(self, guid):
# TODO: this prompts a larger issue of - how do we handle process segments?
total = []
for proc in self.input.get_process_docs('unique_id:%s' % (guid,)):
process_id = get_process_id(proc)
if process_id not in self.input_proc_guids:
self.input_proc_guids.add(process_id)
total.append(proc)
parent_process_id = get_parent_process_id(proc)
if parent_process_id and parent_process_id not in self.input_proc_guids:
total.extend(self.traverse_up(parent_process_id))
return total
def traverse_down(self, guid):
total = []
for proc in self.input.get_process_docs('parent_unique_id:%s' % (guid,)):
process_id = get_process_id(proc)
if process_id not in self.input_proc_guids:
self.input_proc_guids.add(process_id)
total.append(proc)
total.extend(self.traverse_down(process_id))
return total
def traverse_up_down(self, proc):
# TODO: infinite recursion prevention
parent_process_id = get_parent_process_id(proc)
process_id = get_process_id(proc)
total = []
# get parents
if parent_process_id:
total.extend(self.traverse_up(parent_process_id))
total.extend(self.traverse_down(process_id))
for proc in total:
yield proc
def get_process_docs(self):
for proc in self.input.get_process_docs():
process_id = get_process_id(proc)
if process_id not in self.input_proc_guids:
self.input_proc_guids.add(get_process_id(proc))
yield proc
if self.traverse_tree:
for tree_proc in self.traverse_up_down(proc):
yield tree_proc
def update_feeds(self, doc):
feed_keys = [k for k in doc.keys() if k.startswith('alliance_data_')]
feed_lookup = set()
for key in feed_keys:
feed_name = key[14:]
for doc_name in doc[key]:
feed_lookup.add("%s:%s" % (feed_name, doc_name))
retval = feed_lookup - self.seen_feeds
self.seen_feeds |= feed_lookup
return retval
@staticmethod
def generate_fake_sensor(sensor_id):
sensor = {'build_info': {'architecture': 32,
'build_version': 50106,
'id': 9,
'installer_avail': True,
'major_version': 5,
'minor_version': 0,
'patch_version': 0,
'upgrader_avail': True,
'version_string': '005.000.000.50106'},
'os_info': {'architecture': 32,
'display_string': 'Windows 7 Ultimate Edition Service Pack 1, 32-bit',
'id': 1,
'major_version': 6,
'minor_version': 1,
'os_type': 1,
'product_type': 1,
'service_pack': 'Service Pack 1',
'suite_mask': 256},
'sensor_info': {'boot_id': 17,
'build_id': 9,
'clock_delta': 2654783,
'computer_dns_name': 'sensor%d' % sensor_id,
'computer_name': 'sensor%d' % sensor_id,
'computer_sid': 'S-1-5-21-2002419555-2189168078-3210101973',
'cookie': 1962833602,
'display': True,
'emet_dump_flags': None,
'emet_exploit_action': None,
'emet_is_gpo': False,
'emet_process_count': 0,
'emet_report_setting': None,
'emet_telemetry_path': None,
'emet_version': None,
'event_log_flush_time': None,
'group_id': 1,
'id': sensor_id,
'last_checkin_time': datetime.datetime(2015, 6, 30, 6, 9, 15, 570570),
'last_update': datetime.datetime(2015, 6, 30, 6, 9, 18, 170552),
'license_expiration': datetime.datetime(1990, 1, 1, 0, 0),
'network_adapters': '192.168.10.241,000c19e962f6|192.168.10.5,000c23b742dc|',
'network_isolation_enabled': False,
'next_checkin_time': datetime.datetime(2015, 6, 30, 6, 9, 45, 564598),
'node_id': 0,
'notes': None,
'num_eventlog_bytes': 400,
'num_storefiles_bytes': 10304408,
'os_environment_id': 1,
'parity_host_id': 2,
'physical_memory_size': 1073209344,
'power_state': 0,
'registration_time': datetime.datetime(2015, 1, 23, 15, 39, 54, 911720),
'restart_queued': False,
'sensor_health_message': 'Healthy',
'sensor_health_status': 100,
'sensor_uptime': 2976455,
'session_token': 0,
'supports_2nd_gen_modloads': False,
'supports_cblr': True,
'supports_isolation': True,
'systemvolume_free_size': 49276923904,
'systemvolume_total_size': 64422408192,
'uninstall': False,
'uninstalled': None,
'uptime': 340776}}
return sensor
def transport(self, debug=False):
# TODO: multithread this so we have some parallelization
log.info("Starting transport from %s to %s" % (self.input.connection_name(), self.output.connection_name()))
input_version = self.input.get_version()
if not self.output.set_data_version(input_version):
raise Exception("Input and Output versions are incompatible")
# get process list
for i, proc in enumerate(self.get_process_docs()):
new_md5sums = self.update_md5sums(proc)
new_sensor_ids = self.update_sensors(proc)
new_feed_ids = self.update_feeds(proc)
# output docs, sending binaries & sensors first
for md5sum in new_md5sums:
doc = self.input.get_binary_doc(md5sum)
if doc:
new_feed_ids |= self.update_feeds(doc)
self.output_binary_doc(doc)
else:
log.warning("Could not retrieve the binary MD5 %s referenced in the process with ID: %s"
% (md5sum, proc['unique_id']))
# TODO: right now we don't munge sensor or feed documents
for sensor in new_sensor_ids:
doc = self.input.get_sensor_doc(sensor)
if not doc:
log.warning("Could not retrieve sensor info for sensor id %s referenced in the process with ID: %s"
% (md5sum, proc['unique_id']))
doc = self.generate_fake_sensor(sensor)
self.output_sensor_info(doc)
for feed in new_feed_ids:
doc = self.input.get_feed_doc(feed)
if doc:
self.output_feed_doc(doc)
else:
log.warning("Could not retrieve feed document for id %s referenced in the process with ID: %s"
% (md5sum, proc['unique_id']))
self.output_process_doc(proc)
# clean up
self.input.cleanup()
self.output.cleanup()
sys.stdout.write('%-70s\r' % "")
sys.stdout.flush()
log.info("Transport complete from %s to %s" % (self.input.connection_name(), self.output.connection_name()))
def get_report(self):
return self.output.report()
class CleanseSolrData(object):
def __init__(self):
pass
@staticmethod
def munge_document(doc_type, doc_content):
doc_content.pop('_version_', None)
for key in list(doc_content):
if key.endswith('_facet'):
doc_content.pop(key, None)
return doc_content
class DataAnonymizer(object):
def __init__(self):
pass
@staticmethod
def translate(s):
"""
Super dumb translation for anonymizing strings.
:param s: input string
"""
s_new = ''
for c in s:
if c == '\\':
s_new += c
else:
c = chr((ord(c)-65 + 13) % 26 + 65)
s_new += c
return s_new
@staticmethod
def anonymize(doc):
hostname = doc.get('hostname', '')
hostname_new = DataAnonymizer.translate(hostname)
username = doc.get('username', '')
translation_usernames = {}
if len(username) > 0:
if username.lower() != 'system' and username.lower() != 'local service' and username.lower() != \
'network service':
pieces = username.split('\\')
for piece in pieces:
translation_usernames[piece] = DataAnonymizer.translate(piece)
for field in doc:
values = doc[field]
try:
if not values:
continue
was_list = True
targets = values
if not hasattr(values, '__iter__'):
was_list = False
targets = [values]
values = []
for target in targets:
target = target.replace(hostname, hostname_new)
for key in translation_usernames:
target = target.replace(key, translation_usernames.get(key))
values.append(target)
if not was_list:
values = values[0]
doc[field] = values
except AttributeError:
pass
return doc
def munge_document(self, doc_type, doc_content):
return self.anonymize(doc_content)
| mit | 3,804,607,781,867,005,400 | 37.835165 | 119 | 0.491865 | false |
GrognardsFromHell/TemplePlus | tpdatasrc/co8infra/scr/Spell740 - Ray of Clumsiness.py | 1 | 1719 | from toee import *
import tpdp
def OnBeginSpellCast( spell ):
print "Ray of Cluminess OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
def OnSpellEffect( spell ):
print "Ray of Clumsiness OnSpellEffect"
def OnBeginRound( spell ):
print "Ray of Clumsiness OnBeginRound"
def OnBeginProjectile( spell, projectile, index_of_target ):
print "Ray of Clumsiness OnBeginProjectile"
#spell.proj_partsys_id = game.particles( 'sp-Ray of Enfeeblement', projectile )
projectile.obj_set_int( obj_f_projectile_part_sys_id, game.particles( 'sp-Ray of Enfeeblement', projectile ) )
def OnEndProjectile( spell, projectile, index_of_target ):
print "Ray of Clumsiness OnEndProjectile"
target_item = spell.target_list[0]
dam_bonus = min( 5, spell.caster_level / 2 )
dam_amount = spell.roll_dice_with_metamagic(1, 6, dam_bonus)
dam_amount = -dam_amount
print "amount=", dam_amount
spell.duration = 10 * spell.caster_level
game.particles_end( projectile.obj_get_int( obj_f_projectile_part_sys_id ) )
if spell.caster.perform_touch_attack( target_item.obj ) & D20CAF_HIT:
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 20022, tf_red )
target_item.obj.condition_add_with_args( 'sp-Cats Grace', spell.id, spell.duration, dam_amount )
target_item.partsys_id = game.particles( 'sp-Ray of Enfeeblement-Hit', target_item.obj )
else:
# missed
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30007 )
game.particles( 'Fizzle', target_item.obj )
spell.target_list.remove_target( target_item.obj )
spell.spell_end( spell.id )
def OnEndSpellCast( spell ):
print "Ray of Clumsiness OnEndSpellCast" | mit | -6,098,167,636,621,271,000 | 31.45283 | 111 | 0.733566 | false |
arkon/cdf-scrapers | labs/labs.py | 1 | 2979 | from collections import OrderedDict
from html.parser import HTMLParser
import argparse
import datetime
import json
import os
import sys
import time
import urllib.request
class PageParser(HTMLParser):
"""Parser for CDF Lab Machine Usage page."""
def __init__(self):
HTMLParser.__init__(self)
# Flag for whether an element should be parsed
self.inCell = False
# A data row contains 6 cells
self.rowCell = 0
# List of lab rooms/data
self.data = []
# Timestamp
self.timestamp = ''
def handle_starttag(self, tag, attrs):
# Only read <td> tags
if tag == 'td':
self.inCell = True
def handle_data(self, data):
if not self.inCell:
return
if self.rowCell == 0:
if (data != 'NX'):
data = 'BA ' + data
self.data.append(OrderedDict([
('name', data)
]))
elif self.rowCell == 1:
self.data[-1]['available'] = int(data)
elif self.rowCell == 2:
self.data[-1]['busy'] = int(data)
elif self.rowCell == 3:
self.data[-1]['total'] = int(data)
elif self.rowCell == 4:
self.data[-1]['percent'] = float(data)
elif self.rowCell == 5:
if (self.timestamp == ''):
# Attempt to compensate for changing timezones,
# possibly due to daylight savings
rawTime = data.strip('\u00a0\\n')
timestamp = time.strptime(rawTime, '%a %b %d %H:%M:%S %Z %Y')
if timestamp:
self.timestamp = time.strftime(
'%Y-%m-%d %H:%M:%S %Z', timestamp)
self.rowCell = -1
self.rowCell += 1
self.inCell = False
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
description='Scraper for CDF lab data.')
argparser.add_argument(
'-o', '--output',
help='The output path. Defaults to current directory.',
required=False)
argparser.add_argument(
'-f', '--filename',
help='The output filename. Defaults to "cdflabs.json".',
required=False)
args = argparser.parse_args()
output = '.'
filename = 'cdflabs.json'
# Get data
html = str(urllib.request.urlopen(
'http://www.teach.cs.toronto.edu/usage/usage.html').read())
parser = PageParser()
parser.feed(html)
data = OrderedDict([
('timestamp', parser.timestamp),
('labs', parser.data)
])
# Output
if args.output:
if not os.path.exists(args.output):
os.makedirs(args.output)
output = args.output
if args.filename:
filename = args.filename
if args.output or args.filename:
with open('%s/%s' % (output, filename), 'w+') as outfile:
json.dump(data, outfile)
else:
print(json.dumps(data))
| mit | -283,823,200,020,091,780 | 24.681034 | 77 | 0.536757 | false |
mapzen/vector-datasource | scripts/csv_colours.py | 2 | 2023 | from vectordatasource.colour import parse_colour
from vectordatasource.transform import Palette
from optparse import OptionParser
import sys
import csv
import yaml
def to_hex(colour):
return "#%02x%02x%02x" % tuple(colour)
yaml_config_file = None
output_file = sys.stdout
output_colour_key = 'Colour name'
input_colour_key = 'colour'
parser = OptionParser()
parser.add_option("-c", "--config", dest="config",
help="YAML configuration file to read")
parser.add_option("-o", "--output", dest="output",
help="Output file. Default is stdout.")
parser.add_option("-k", "--key", dest="colour_key",
help="Key / CSV header to use for output colour.")
parser.add_option("-i", "--input-key", dest="input_colour_key",
help="Key / CSV header to use for input colour.")
parser.add_option("-x", "--output-hex-key", dest="output_hex_key",
help="Optional key to output hex colour in addition to name")
(options, args) = parser.parse_args()
if options.output:
output_file = open(options.output, 'wb')
with open(options.config, 'rb') as yaml_fh:
config = yaml.load(yaml_fh)
if options.colour_key:
output_colour_key = options.colour_key
palette = Palette(config['colours'])
for file_name in args:
with open(file_name, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
writer = None
for row in reader:
if writer is None:
keys = list(row.keys()) + [output_colour_key]
if options.output_hex_key:
keys.append(options.output_hex_key)
writer = csv.DictWriter(output_file, keys)
writer.writeheader()
colour = parse_colour(row[input_colour_key])
if colour:
c = palette(colour)
row[output_colour_key] = c
if options.output_hex_key:
row[options.output_hex_key] = to_hex(palette.get(c))
writer.writerow(row)
| mit | 1,974,132,946,544,497,700 | 33.288136 | 79 | 0.608008 | false |
Leibniz137/testinfra | testinfra/modules/supervisor.py | 1 | 3502 | # coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from testinfra.modules.base import Module
STATUS = [
"STOPPED", "STARTING", "RUNNING", "BACKOFF", "STOPPING", "EXITED",
"FATAL", "UNKNOWN",
]
class Supervisor(Module):
"""Test supervisor managed services
>>> gunicorn = Supervisor("gunicorn")
>>> gunicorn.status
'RUNNING'
>>> gunicorn.is_running
True
>>> gunicorn.pid
4242
"""
def __init__(self, name, _attrs_cache=None):
self.name = name
self._attrs_cache = _attrs_cache
super(Supervisor, self).__init__()
@staticmethod
def _parse_status(line):
splitted = line.split()
name = splitted[0]
status = splitted[1]
# supervisorctl exit status is 0 even if it cannot connect to
# supervisord socket and output the error to stdout.
# So we check that parsed status is a known status.
if status not in STATUS:
raise RuntimeError(
"Cannot get supervisor status. Is supervisor running ?")
if status == "RUNNING":
pid = splitted[3]
if pid[-1] == ",":
pid = int(pid[:-1])
else:
pid = int(pid)
else:
pid = None
return {"name": name, "status": status, "pid": pid}
@property
def _attrs(self):
if self._attrs_cache is None:
line = self.check_output("supervisorctl status %s", self.name)
attrs = self._parse_status(line)
assert attrs["name"] == self.name
self._attrs_cache = attrs
return self._attrs_cache
@property
def is_running(self):
"""Return True if managed service is in status RUNNING"""
return self.status == "RUNNING"
@property
def status(self):
"""Return the status of the managed service
Status can be STOPPED, STARTING, RUNNING, BACKOFF, STOPPING,
EXITED, FATAL, UNKNOWN.
See http://supervisord.org/subprocess.html#process-states
"""
return self._attrs["status"]
@property
def pid(self):
"""Return the pid (as int) of the managed service"""
return self._attrs["pid"]
@classmethod
def get_services(cls):
"""Get a list of services running under supervisor
>>> Supervisor.get_services()
[<Supervisor(name="gunicorn", status="RUNNING", pid=4232)>
<Supervisor(name="celery", status="FATAL", pid=None)>]
"""
services = []
for line in cls(None).check_output(
"supervisorctl status",
).splitlines():
attrs = cls._parse_status(line)
service = cls(attrs["name"], attrs)
services.append(service)
return services
def __repr__(self):
return "<Supervisor(name=%s, status=%s, pid=%s)>" % (
self.name,
self.status,
self.pid,
)
| apache-2.0 | 7,123,089,475,247,661,000 | 29.99115 | 74 | 0.590805 | false |
JessWalters/VinnyBot | Core/Stats.py | 1 | 2045 | import glob
import json
import os
import threading
import discord
from urllib.request import urlopen
from urllib.request import Request
from Config import getToken
commandsCalled = 0
members = {}
VINNY_COLOR = int('008cba', 16)
async def getStats(message, client):
serverCount = 0
channelCount = 0
for server in client.guilds:
serverCount += 1
for channel in server.channels:
channelCount += 1
for member in server.members:
members[member.id] = 1
if message.channel.permissions_for(message.guild.me).embed_links:
embed = discord.Embed(title='', colour=VINNY_COLOR)
embed.add_field(name='Servers',
value='{}'.format(serverCount),
inline=True)
embed.add_field(name='Channels', value=channelCount, inline=True)
embed.add_field(name='Users', value=len(members), inline=True)
try:
embed.add_field(name='Shards', value=str(len(client.shard_ids)), inline=False)
except TypeError:
embed.add_field(name='Shards', value=5, inline=False)
embed.set_author(name=client.user.name, icon_url=client.user.avatar_url)
return await message.channel.send("Find more detailed stats at: https://goo.gl/Jct6uL", embed=embed)
else:
await message.channel.send(message.channel, "Vinny Stats:\n`Servers: " + str(serverCount) + "\nChannels: " + str(channelCount)
+ "\n`")
def sendStatistics(client):
url = "https://bots.discord.pw/api/bots/" + getToken('Bot ID') + "/stats"
serverCount = len(client.guilds)
data = {
"server_count": serverCount
}
req = Request(url)
req.add_header('Content-Type', 'application/json')
req.add_header('Authorization', getToken('Bot API'))
response = urlopen(req, json.dumps(data).encode('utf8'))
print('Stats Posted Successfully')
t = threading.Timer(3600.0, sendStatistics, args=(client,))
t.setDaemon(True)
t.start()
| mit | -4,362,727,674,846,634,000 | 33.661017 | 134 | 0.632274 | false |
dchaplinsky/pep.org.ua | pepdb/core/migrations/0160_auto_20190801_1806.py | 1 | 1125 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-08-01 15:06
from __future__ import unicode_literals
from django.db import migrations, models
def publish_everything(apps, schema_editor):
Company = apps.get_model("core", "Company")
Person = apps.get_model("core", "Person")
Person.objects.all().update(publish=True)
Company.objects.all().update(publish=True)
class Migration(migrations.Migration):
dependencies = [
('core', '0159_auto_20190529_1550'),
]
operations = [
migrations.AlterField(
model_name='company',
name='publish',
field=models.BooleanField(default=True, verbose_name='\u041e\u043f\u0443\u0431\u043b\u0456\u043a\u0443\u0432\u0430\u0442\u0438'),
),
migrations.AlterField(
model_name='person',
name='publish',
field=models.BooleanField(default=True, verbose_name='\u041e\u043f\u0443\u0431\u043b\u0456\u043a\u0443\u0432\u0430\u0442\u0438'),
),
migrations.RunPython(
publish_everything, reverse_code=migrations.RunPython.noop),
]
| mit | 1,302,245,182,735,781,600 | 31.142857 | 141 | 0.643556 | false |
delitamakanda/socialite | app/main/forms.py | 1 | 2488 | from flask_wtf import Form
from flask_pagedown.fields import PageDownField
from wtforms import TextField, StringField, SubmitField, TextAreaField, BooleanField, SelectField, ValidationError
from wtforms.validators import Required, Length, Regexp, EqualTo, Email
from ..models import User, Role, Comment
class CommentForm(Form):
body = StringField("", validators=[Required()])
submit = SubmitField('Submit')
class PostForm(Form):
body = PageDownField("What's on your mind ?", validators=[Required()])
submit = SubmitField('Submit')
class EditProfileForm(Form):
name = StringField('Real name', validators=[Length(0,64)])
location = StringField('Location', validators=[Length(0,64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, 'Usernames must have only letters, numbers, dots or underscore.')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators=[Length(0,64)])
location = StringField('Location', validators=[Length(0,64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ContactForm(Form):
name = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, 'Usernames must have only letters, numbers, dots or underscore.')])
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
subject = TextField("Subject", validators=[Required()])
message = TextAreaField("Message", validators=[Required()])
submit = SubmitField("Send")
| mit | -8,109,677,442,507,174,000 | 43.428571 | 183 | 0.679662 | false |
fred806/Pype | pypeNuke.py | 1 | 52150 | import nuke
import nukescripts
import os
from xml.dom import minidom
from xml.dom.minidom import Document
from xml.etree import ElementTree as ET
import getpass
import re
import ftplib
from ftplib import FTP
import uuid
import os2emxpath as path
import subprocess as sp
import logging
pypePath = os.environ['PYPE_PATH']
def pypeMenuNk():
#Building the menu
menubar=nuke.menu("Nuke")
m=menubar.addMenu("&Pype")
m.addCommand( 'Set Project', "pypeSetProjectNk()" )
m.addCommand( 'Project Info', "getPypeProjectInfoNk()" )
m.addSeparator()
m.addCommand( 'Open', "pypeShotOpenNk()" )
m.addCommand( 'Save+', "incrementalSaveNk()" )
m.addSeparator()
m.addCommand( 'Create Write Node', "createWriteNodeNk()" )
m.addCommand( 'Send Render to Pype', "renderToPypeNk()" )
m.addCommand( 'Apply Shot In-Out Frames', "applyShotInOutFramesNk()" )
m.addCommand( 'Apply Shot Handles', "applyShotHandlesNk()" )
m.addCommand( 'Apply Shot Framerate', "applyShotFramerateNk()" )
m.addCommand( 'Apply Shot Resolution', "applyShotResolutionNk()" )
m.addSeparator()
m.addCommand( 'Save WIP image', "savePypeWIPNk()" )
m.addCommand( 'Update Thumbnail', "saveThumbnailNk()" )
m.addCommand( 'Save Shot Cover', "saveCoverNk()" )
#m.addSeparator()
#m.addCommand( pypeMenu+'/Load Files', "print('Yeah!')" )
#m.addCommand( pypeMenu+'/Create Write Node', "print('Yeah!')" )
############################################################
class PypeShotOpenPanel( nukescripts.PythonPanel ):
def __init__( self ):
'''Lists all shots and their respective versions'''
nukescripts.PythonPanel.__init__( self, 'Pype Shot Open' )
# CREATE KNOBS
self.setMinimumSize(450, 100)
pypeInit()
getShotList()
shotList = getShotList()
seqList = getSeqList()
self.seqKnob = nuke.Enumeration_Knob( 'seq', 'Sequence ', seqList )
self.seqKnob.clearFlag( nuke.STARTLINE )
self.shotKnob = nuke.Enumeration_Knob( 'shot', ' Shot ', shotList )
self.shotKnob.clearFlag( nuke.STARTLINE )
self.versionKnob = nuke.Enumeration_Knob( 'version', ' Version ', [] )
self.versionKnob.clearFlag( nuke.STARTLINE )
self.descriptionKnob = nuke.Text_Knob('', '', ' ')
self.descriptionKnob2 = nuke.String_Knob('', '', ' ')
# ADD KNOBS
for k in ( self.seqKnob, self.shotKnob, self.versionKnob ):
self.addKnob( k )
# STORE DICTIONARY OF shotS PER TYPE
self.seqShotDict = {}
self.versionDict = {}
# FILL DICTIONARY
self.getSeqData( seqList )
self.getShotData( shotList )
def getSeqData( self, seqList ):
seqList = getSeqList()
dict = {}
for seq in seqList:
shots = getSeqShotList( seq )
dict[seq] = shots
print ( str(seq) + ":" + str(shots) )
self.seqShotDict = dict
def getShotData( self, shotList ):
shotList = getShotList()
dict = {}
for shot in shotList:
versions = getVersionsList( shot )
dict[shot] = versions
print ( str(shot) + ":" + str(versions) )
self.versionDict = dict
def knobChanged( self, knob ):
if knob is self.seqKnob or knob.name()=='showPanel':
self.shotKnob.setValues( self.seqShotDict[ self.seqKnob.value() ] )
self.shotKnob.setValue(0)
self.versionKnob.setValues( self.versionDict[ self.shotKnob.value() ] )
self.versionKnob.setValue(0)
if knob is self.shotKnob:
self.shotKnob.setValues( self.seqShotDict[ self.seqKnob.value() ] )
self.versionKnob.setValues( self.versionDict[ self.shotKnob.value() ] )
############################################################
def pypeShotOpenNk():
p = PypeShotOpenPanel()
if p.showModalDialog():
#print ( p.shotKnob.value(), p.versionKnob.value() )
file = ( projectPath + '/CG/Shots/' + p.shotKnob.value() + '/Comp/Nuke/' + p.shotKnob.value() + '_Comp_' + p.versionKnob.value() + '.nk' )
nuke.scriptOpen( file )
############################################################
def getShotNukePath( shotName ):
shotNukePath = ( projectPath + '/CG/Shots/' + shotName + '/Comp/Nuke')#For testing
return shotNukePath
############################################################
def getShotList():
#Listing folders
fileFolder =( projectPath + '/CG/Shots/' )
shotList = []
if os.path.exists( fileFolder ):
for file in os.listdir( fileFolder ):
#Nuke File Validation
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}$") )
if rex1.match(file):
shotList.extend([str(file)])
elif rex2.match(file):
shotList.extend([str(file)])
else:
pass
#print(file)
#shotList.extend([str(file)])
return shotList
############################################################
def getSeqShotList( seqName ):
#Listing folders
fileFolder =( projectPath + '/CG/Shots/' )
shotList = []
if os.path.exists( fileFolder ):
for file in os.listdir( fileFolder ):
if ('Shot_' + seqName ) in file:
#Nuke File Validation
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}$") )
if rex1.match(file):
shotList.extend([str(file)])
elif rex2.match(file):
shotList.extend([str(file)])
else:
pass
#print(file)
#shotList.extend([str(file)])
else:
pass
return shotList
############################################################
def getSeqList():
#Listing folders
fileFolder =( projectPath + '/CG/Shots/' )
seqList = []
shotList = getShotList()
if os.path.exists( fileFolder ):
for item in shotList:
fileName = item
shotNumber = fileName[-3::]
seqNumber = fileName[-7:-4]
if seqNumber in seqList:
pass
else:
seqList.append(seqNumber)
return seqList
############################################################
def getVersionsList( shotName ):
fileFolder = ( projectPath + '/CG/Shots/' + shotName + '/Comp/Nuke' )
shotVersionsList = []
if os.path.exists( fileFolder ):
for file in os.listdir( fileFolder ):
if file.endswith(".nk"):
fileName = file.split('/')[-1].partition(".")[0]
shotVersion = fileName[-4::]
#print(shotVersion)
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}.nk$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}.nk$") )
if rex1.match(file):
shotVersionsList.extend([str(shotVersion)])
elif rex2.match(file):
shotVersionsList.extend([str(shotVersion)])
else:
pass
shotVersionsList.reverse()
return shotVersionsList
############################################################
def pypeInit():
#Vars
global projectPath
global projectName
pypePath = os.environ['PYPE_PATH']
#Vars
fileXML = (pypePath + '/pypeSettings.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('item')
#Reading current project
projectPath = ''
for s in itemlist:
user = getpass.getuser()
#print ('User: ' + user)
if s.attributes['user'].value == user:
projectPath = s.attributes["project"].value
else:
pass
#print projectPath
projectName = projectPath.split('/')[-1].partition(".")[0]
projectName = os.path.basename(os.path.normpath( projectPath ))
print ('Project Name: ' + projectName)
############################################################
def getPypeProjectInfoNk():
global projectPath
pypeProject = projectPath
fileXML = (pypeProject + '/_pype/pypeProject.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
projectName = s.attributes['name'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
type = s.attributes['type'].value
user = s.attributes['user'].value
nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
############################################################
# DEFINE SHOT'S NUKE DIR
def nukeDir( projectPath, shotName ):
nkDir = ( projectPath + '/CG/Shots/' + shotName + '/Comp/Nuke/' )
if not os.path.isdir( nkDir ):
raise ValueError, 'NUKE directory does not exist'
return nkDir
############################################################
def incrementalSaveNk():
fileSaved = False
version = 1
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
while not fileSaved:
path = nuke.root().knob('name').value()
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version = fileName[-3::]
newVersion = "%03d" % (int(version) + 1)
newFileName = (fileNameShort + '_Comp_v' + newVersion + ext)
newFileNameLong = (fileFolder + "\\" + fileNameShort + '_Comp_v' + newVersion + ext)
newVersionExists = os.path.exists(newFileNameLong)
if os.path.isfile( newFileNameLong ):
print('File already exists!')
#versionTmp = newVersion
#newVersion = ( int(versionTmp) + 1 )
#newVersion += 1
#newFileNameLong = (fileFolder + "\\" + fileNameShort + '_v' + newVersion + ext)
#continue
break
comment = nuke.getInput( 'Comment', '' )
#XML-LOCAL
fileXMLFolder = ( fileFolder + '/_pype')
if not os.path.isdir(fileXMLFolder):
os.makedirs(fileXMLFolder)
fileXML = ( fileXMLFolder + '/pypeHistory.xml' )
user = getpass.getuser()
doc = Document()
root_node = doc.createElement("history")
doc.appendChild(root_node)
object_node = doc.createElement("data")
root_node.appendChild(object_node)
# set attributes
object_node.setAttribute("version", "001")
object_node.setAttribute("user", user)
object_node.setAttribute("comment", "Initial save")
xml_file = open( fileXML, "w")
xml_file.write(doc.toprettyxml())
xml_file.close()
if not os.path.isfile( ( fileXMLFolder + '/pypeHistory.xml' ) ):
fileXML = ( fileXMLFolder + '/pypeHistory.xml' )
user = getpass.getuser()
doc = Document()
root_node = doc.createElement("history")
doc.appendChild(root_node)
object_node = doc.createElement("data")
root_node.appendChild(object_node)
# set attributes
object_node.setAttribute("version", "001")
object_node.setAttribute("user", user)
object_node.setAttribute("comment", "Initial save")
xml_file = open( fileXML, "w")
xml_file.write(doc.toprettyxml())
xml_file.close()
fileXML = ( fileXMLFolder + '/pypeHistory.xml' )
user = getpass.getuser()
doc = ET.parse( fileXML )
root = doc.getroot()
#Data to add
data1 = ET.Element("data", {"version": newVersion, "user": user, "comment": comment})
root.append(data1)
out = ET.tostring(root)
dom = minidom.parseString(out)
xml_file = open( fileXML, "w")
xml_file.write(dom.toprettyxml())
xml_file.close()
#
#FTP PYPE
ftp_send(fileXML, ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp"), ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp/pypeHistory.xml") )
#Saving
nuke.scriptSaveAs( newFileNameLong )
fileSaved = True
saveThumbnailNk()
break
else:
nuke.message("File not recognized by Pype.")
else:
nuke.message("File not recognized by Pype.")
else:
nuke.message("File not recognized by Pype.")
############################################################
def pypeFileCheckNk():
pypeFileCheck = False
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
pypeFileCheck = True
return pypeFileCheck
############################################################
def applyShotInOutFramesNk():
global projectPath
pypeFileCheck = pypeFileCheckNk()
if pypeFileCheck == True:
pypeProject = projectPath
projectFolder = projectPath
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
#fileXML = (pypeProject + '/_pype/pypeProject.xml' )
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
Handles = s.attributes['Handles'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
nuke.knob("root.first_frame", InFrame )
nuke.knob("root.last_frame", OutFrame )
else:
nuke.message("File not recognized by Pype.")
############################################################
def applyShotHandlesNk():
global projectPath
pypeFileCheck = pypeFileCheckNk()
if pypeFileCheck == True:
pypeProject = projectPath
projectFolder = projectPath
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
#fileXML = (pypeProject + '/_pype/pypeProject.xml' )
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
Handles = s.attributes['Handles'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
InFrameHandle = ( float(InFrame) - float(Handles) )
OutFrameHandle = ( float(OutFrame) + float(Handles) )
nuke.knob("root.first_frame", str(InFrameHandle) )
nuke.knob("root.last_frame", str(OutFrameHandle) )
else:
nuke.message("File not recognized by Pype.")
############################################################
def applyShotFramerateNk():
global projectPath
pypeFileCheck = pypeFileCheckNk()
if pypeFileCheck == True:
pypeProject = projectPath
projectFolder = projectPath
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
#fileXML = (pypeProject + '/_pype/pypeProject.xml' )
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
Handles = s.attributes['Handles'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
InFrameHandle = ( float(InFrame) - float(Handles) )
OutFrameHandle = ( float(OutFrame) + float(Handles) )
nuke.knob("root.fps", Framerate )
else:
nuke.message("File not recognized by Pype.")
############################################################
def applyShotResolutionNk():
global projectPath
pypeFileCheck = pypeFileCheckNk()
if pypeFileCheck == True:
pypeProject = projectPath
projectFolder = projectPath
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
#fileXML = (pypeProject + '/_pype/pypeProject.xml' )
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
Handles = s.attributes['Handles'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
InFrameHandle = ( float(InFrame) - float(Handles) )
OutFrameHandle = ( float(OutFrame) + float(Handles) )
#
pypeRez = ( str(resWidth) + ' ' + str(resHeight) + " PypeRez")
nuke.addFormat( pypeRez )
root = nuke.root()
root['format'].setValue( 'PypeRez' )
else:
nuke.message("File not recognized by Pype.")
############################################################
def easySave():
nkDir = nukeDir()
# GET DESCRIPTION FROM USER BUT STRIP ALL WHITE SPACES
#description = nuke.getInput( 'script description', 'bashComp' ).replace( ' ', '' )
fileSaved = False
version = 1
while not fileSaved:
# CONSTRUCT FILE NAME
global shotName
nkName = ( '%s_v%03d.nk' % ( shotName ), version )
# JOIN DIRECTORY AND NAME TO FORM FULL FILE PATH
nkPath = os.path.join( nkDir, nkName )
# IF FILE EXISTS VERSION UP
if os.path.isfile( nkPath ):
version += 1
continue
# SAVE NUKE SCRIPT
nuke.scriptSaveAs( nkPath )
fileSaved = True
return nkPath
############################################################
def pypeSetProjectNk():
pypePath = os.environ['PYPE_PATH']
projectPathPicker = nuke.getFilename('Select Project Source Folder', '')
print ( projectPathPicker )
global projectPath
global projectName
if projectPathPicker == None:
#print ('Cancelled' )
print('Cancelled' )
#projectPath = ''
#projectName = ''
else:
projectPath = projectPathPicker
projectName = os.path.basename(os.path.normpath(projectPathPicker))
#Vars
pypePath = os.environ['PYPE_PATH']
fileXML = (pypePath + '/pypeSettings.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('item')
user = getpass.getuser()
project = projectPath
users=[]
for s in itemlist:
users.append(s.attributes['user'].value)
if user in users:
for s in itemlist:
if s.attributes['user'].value == user:
#print 'Exists'
#We update
s.attributes['project'].value = project
#Writing file
f = open(fileXML, 'w')
parsedXML.writexml(f)
f.close()
break
else:
pass
else:
#print "Doesn't exist"
print('Need to add that guy')
fileXML = (pypePath + '/pypeSettings.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('item')
#Applying new project
doc = ET.parse( fileXML )
root = doc.getroot()
#Data to add
data1 = ET.Element('item', {'project': project, 'user': user})
root.append(data1)
out = ET.tostring(root)
dom = minidom.parseString(out)
xml_file = open( fileXML, 'w')
xml_file.write(dom.toprettyxml())
xml_file.close()
#FTP
#Ftp disabled for pypeSettings because it has no influence on the web interface
#ftp_send( fileXML , "www/pype/", "www/pype/pypeSettings.xml")
############################################################
def saveThumbnailNk():
global projectName
global projectPath
try:
sel = nuke.selectedNode()
except:
return
#image size multiplier
#this will be the thumbnail size (in precentage, 1 is 100%) compared to the original image
imageSize = 0.15
#thumbnail will be saved with this image format
fileType = "jpg"
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
path = nuke.root().knob('name').value()
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version= fileName[-3::]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
if sel.Class() != "Viewer":
#Disabling Proxy
proxy = nuke.root().knob('proxy').value()
nuke.root().knob('proxy').setValue(False)
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
thumbnailDir = scriptsPath + "/_pype"
if not os.path.isdir(thumbnailDir):
os.makedirs(thumbnailDir)
#full thumbnail path
#thumbnailName = ( scriptName + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
thumbnailName = ( fileNameShort + "_Comp_v" + version + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullThumbnailPath = ( thumbnailDir + '/' + fileNameShort + "_Comp_v" + version + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
#reformat node
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,sel)
r.setXYpos(sel.xpos(), sel.ypos()+50)
#r["type"].setValue("scale")
#r["scale"].setValue(imageSize)
r["type"].setValue("to box")
r["box_width"].setValue( 170 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(100)
r["black_outside"].setValue(True)
r["resize"].setValue("height")
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullThumbnailPath)
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(r)
nuke.delete(w)
nuke.root().knob('proxy').setValue(proxy)
#FTP
ftp_send(fullThumbnailPath, ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp"), ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp/" + thumbnailName) )
else:
nuke.message("Viewer not supported. Please choose another node.")
else:
nuke.message("You havent't saved your nuke script, yet. Please save your nuke script first.")
############################################################
def saveCoverNk():
global projectName
global projectPath
try:
sel = nuke.selectedNode()
except:
return
#image size multiplier
#this will be the thumbnail size (in precentage, 1 is 100%) compared to the original image
imageSize = 1
#thumbnail will be saved with this image format
fileType = "jpg"
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
path = nuke.root().knob('name').value()
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version= fileName[-3::]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
if sel.Class() != "Viewer":
#Disabling Proxy
proxy = nuke.root().knob('proxy').value()
nuke.root().knob('proxy').setValue(False)
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
#thumbnailDir = scriptsPath + "/_pype"
thumbnailDir = (projectPath + "/_pype/Shots/" + fileNameShort)
if not os.path.isdir(thumbnailDir):
os.makedirs(thumbnailDir)
#full thumbnail path
coverName = ( fileNameShort + ".{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
thumbnailName = ( fileNameShort + "_thumbnail.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullCoverPath = ( thumbnailDir + '/' + fileNameShort + ".{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullThumbnailPath = ( thumbnailDir + '/' + fileNameShort + "_thumbnail.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
#reformat node
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,sel)
r.setXYpos(sel.xpos(), sel.ypos()+50)
r["type"].setValue("to box")
r["box_width"].setValue( 320 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(180)
r["black_outside"].setValue(True)
r["resize"].setValue("height")
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullCoverPath)
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(r)
nuke.delete(w)
#thumbnail
#Reformat for small thumbnail
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,sel)
r.setXYpos(sel.xpos(), sel.ypos()+50)
r["type"].setValue("to box")
r["box_width"].setValue( 170 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(100)
r["black_outside"].setValue(True)
r["resize"].setValue("height")
#Write Node for small thumbnail
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullThumbnailPath)
nuke.execute(w,nuke.frame(),nuke.frame())
nuke.delete(r)
nuke.delete(w)
nuke.root().knob('proxy').setValue(proxy)
#FTP
ftp_send(fullCoverPath, ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort), ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/" + coverName) )
else:
nuke.message("Viewer not supported. Please choose another node.")
else:
nuke.message("You havent't saved your nuke script, yet. Please save your nuke script first.")
############################################################
def savePypeWIPNk():
global projectName
global projectPath
try:
sel = nuke.selectedNode()
except:
return
#image size multiplier
#this will be the thumbnail size (in precentage, 1 is 100%) compared to the original image
imageSize = 0.15
#thumbnail will be saved with this image format
fileType = "jpg"
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
path = nuke.root().knob('name').value()
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version= fileName[-3::]
imageID = uuid.uuid4()
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
if sel.Class() != "Viewer":
#Disabling Proxy
proxy = nuke.root().knob('proxy').value()
nuke.root().knob('proxy').setValue(False)
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
imageFolder = (projectPath + '/_pype/Shots/' + fileNameShort + '/wip/')
imageFolderThumbnail = (projectPath + '/_pype/Shots/' + fileNameShort + '/wip/thumbnail/')
if not os.path.isdir(imageFolder):
os.makedirs(imageFolder)
if not os.path.isdir(imageFolderThumbnail):
os.makedirs(imageFolderThumbnail)
#full thumbnail path
#thumbnailName = ( scriptName + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
imageName = ( fileNameShort + "_Comp_v" + version + "_wip_" + str(imageID) + ".{ext}").format(imageFolder=imageFolder, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullImagePath = ( imageFolder + '/' + fileNameShort + "_Comp_v" + version + "_wip_" + str(imageID) + ".{ext}").format(imageFolder=imageFolder, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullThumbnailPath = ( imageFolder + '/thumbnail/' + fileNameShort + "_Comp_v" + version + "_wip_" + str(imageID) + ".{ext}").format(imageFolder=imageFolder, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,sel)
w.setXYpos(sel.xpos(), sel.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullImagePath)
w.knob('_jpeg_quality').setValue("1")
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(w)
#Thumbnail
#reformat node
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,sel)
r.setXYpos(sel.xpos(), sel.ypos()+50)
#r["type"].setValue("scale")
#r["scale"].setValue(imageSize)
r["type"].setValue("to box")
r["box_width"].setValue( 300 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(300)
r["resize"].setValue("fill")
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob('_jpeg_quality').setValue(1)
w.knob("file").setValue(fullThumbnailPath)
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(r)
nuke.delete(w)
nuke.root().knob('proxy').setValue(proxy)
#FTP
ftp_send(fullImagePath, ( "www/pype/projects/" + projectName + "/uploads/Shots/" + fileNameShort ), ( "www/pype/projects/" + projectName + "/uploads/Shots/" + fileNameShort + "/" + imageName) )
ftp_send(fullThumbnailPath, ( "www/pype/projects/" + projectName + "/uploads/Shots/" + fileNameShort + "/thumbnail"), ( "www/pype/projects/" + projectName + "/uploads/Shots/" + fileNameShort + "/thumbnail/" + imageName) )
nuke.message("WIP saved!")
else:
nuke.message("Viewer not supported. Please choose another node.")
else:
nuke.message("You havent't saved your nuke script, yet. Please save your nuke script first.")
############################################################
def createWriteNodeNk():
global projectName
global projectPath
if nuke.exists( "PypeWrite" ):
w = nuke.toNode("PypeWrite")
nuke.delete(w)
try:
sel = nuke.selectedNode()
except:
return
#thumbnail will be saved with this image format
fileType = "exr"
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
projectFolder = projectPath
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
version= fileName[-3::]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
#
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#
if rex1.match(fileName) or rex2.match(fileName):
if sel.Class() != "Viewer":
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
renderDir = (projectPath + "/CG/Shots/" + fileNameShort + "/Renders/Comp_Renders/" + fileName)
if not os.path.isdir(renderDir):
os.makedirs(renderDir)
#full thumbnail path
#thumbnailName = ( scriptName + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullRenderPath = ( renderDir + '/' + fileNameShort + "_Comp_v" + version + ".####.{ext}").format(thumbnailDir=renderDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
#write node
#w = nuke.createNode("Write", inpanel = False)
w = nuke.nodes.Write (name="PypeWrite")
w.setInput(0,sel)
w.setXYpos(sel.xpos(), sel.ypos()+50)
w.knob("use_limit").setValue(True)
w.knob("first").setValue(float(InFrame))
w.knob("last").setValue(float(OutFrame))
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullRenderPath)
#FTP
#ftp_send(fullThumbnailPath, ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp"), ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp/" + thumbnailName) )
else:
nuke.message("Viewer not supported. Please choose another node.")
else:
nuke.message("You havent't saved your nuke script, yet. Please save your nuke script first.")
############################################################
def renderToPypeNk():
global projectName
global projectPath
scriptsRoot = nuke.root().name()
path = nuke.root().knob('name').value()
projectFolder = projectPath
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
version= fileName[-3::]
imageID = uuid.uuid4()
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
ext = "exr"
#
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
InFrame = s.attributes['InFrame'].value
InFrame = InFrame.zfill(4)
OutFrame = s.attributes['OutFrame'].value
OutFrame = OutFrame.zfill(4)
#
if rex1.match(fileName) or rex2.match(fileName):
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
renderDir = (projectPath + "/CG/Shots/" + fileNameShort + "/Renders/Comp_Renders/" + fileName)
fullRenderPath = ( renderDir + '/' + fileNameShort + "_Comp_v" + version + ".%4d." + ext)
fullRenderPath = fullRenderPath.replace("//", "/" )
firstRenderFrame = ( renderDir + '/' + fileNameShort + "_Comp_v" + version + "."+InFrame+"." + ext)
firstRenderFrame = firstRenderFrame.replace("//", "/" )
print firstRenderFrame
#import os.path
if os.path.isfile(firstRenderFrame):
print "File exists! Sending to Pype"
sendRenderToPype(fullRenderPath,InFrame,Framerate)
nuke.message("Sent to Pype!")
else:
print "File doesn't exist..."
############################################################
def sendRenderToPype(in_file,start_number,framerate):
global projectName
global projectPath
pypePath = os.environ['PYPE_PATH']
ffmpegBin = (pypePath + '/bin/ffmpeg-3.3.2-win64-static/bin/ffmpeg.exe')
path = nuke.root().knob('name').value()
filePath = in_file
#filePath = path.normpath(in_file)
fileFolder = os.path.abspath(os.path.join(filePath, os.pardir))
fileFolder = '/'.join(fileFolder.split('\\'))
#fileFolder = path.normpath(fileFolder)
ext = filePath[-3::]
fileName = filePath.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version= fileName[-3::]
imageID = uuid.uuid4()
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
imageFolder = (projectPath + '/_pype/Shots/' + fileNameShort + '/wip/')
if not os.path.exists(imageFolder):
os.makedirs(imageFolder)
imageFolderThumbnail = (projectPath + '/_pype/Shots/' + fileNameShort + '/wip/thumbnail/')
if not os.path.exists(imageFolderThumbnail):
os.makedirs(imageFolderThumbnail)
print ("Saving " + fileName + ".mp4" + " in " + fileFolder)
out_file = (imageFolder + "/" + fileName + str(imageID) + ".mp4")
out_thumb = (imageFolder + "/" + fileName + str(imageID) + ".jpg")
out_thumb_temp = (imageFolderThumbnail + "/" + fileName + str(imageID) + "_temp.jpg")
#out_thumb_temp = os.path.join(fileFolder, (fileName + "_temp.jpg"))
print out_file
if os.path.isfile(out_file) :
print "Exists!"
os.remove(out_file)
pass
else:
pass
if in_file.endswith(".mov"):
ffmpeg = sp.Popen([ffmpegBin, '-r', framerate, '-f', 'mp4', '-i', in_file, '-vcodec', 'libx264', '-f', 'mp4', '-r', framerate, '-pix_fmt', 'yuv420p', out_file], stdout = sp.PIPE, stderr = sp.STDOUT)
out, err = ffmpeg.communicate()
print out, err, ffmpeg.returncode
else:
ffmpeg = sp.Popen([ffmpegBin, '-r', framerate, '-apply_trc', 'iec61966_2_1', '-start_number', start_number, '-i', in_file, '-f', 'mp4', '-r', framerate, '-pix_fmt', 'yuv420p', out_file], stdout = sp.PIPE, stderr = sp.STDOUT)
out, err = ffmpeg.communicate()
print out, err, ffmpeg.returncode
#process_output = ffmpeg.communicate()
#for output in process_output:
# print output
#Creating Thumbnail
ffmpeg = sp.Popen([ffmpegBin, '-ss', '00:00:01', '-t', '00:00:00.04', '-i', out_file, '-r', framerate, out_thumb_temp], stdout = sp.PIPE, stderr = sp.STDOUT)
out, err = ffmpeg.communicate()
print out, err, ffmpeg.returncode
#process_output = ffmpeg.communicate()
#for output in process_output:
# print output
#Cropping thumbnail
#Getting size
thumbnailPath = out_thumb
#
proxy = nuke.root().knob('proxy').value()
nuke.root().knob('proxy').setValue(False)
#write node
rd = nuke.createNode("Read", inpanel = False)
rd.knob("name").setValue("capture1")
rd.knob("file").setValue(out_thumb_temp)
#delete nodes
#nuke.delete(w)
#Thumbnail
#reformat node
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,rd)
r.setXYpos(rd.xpos(), rd.ypos()+50)
#r["type"].setValue("scale")
#r["scale"].setValue(imageSize)
r["type"].setValue("to box")
r["box_width"].setValue( 300 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(300)
r["resize"].setValue("fill")
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue("jpg")
w.knob("file").setValue(out_thumb)
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(rd)
nuke.delete(r)
nuke.delete(w)
nuke.root().knob('proxy').setValue(proxy)
#
#PYPE FTP
imageName = ((out_file.split('/')[-1].partition(".")[0]) + ".mp4")
thumbName = ((out_thumb.split('/')[-1].partition(".")[0]) + ".jpg")
imagePype = ( '/projects/' + projectName + '/uploads/Shots/' + fileNameShort + '/' + imageName )
imagePypePath = ( '/projects/' + projectName + '/uploads/Shots/' + fileNameShort )
thumbnailPypePath = ( '/projects/' + projectName + '/uploads/Shots/' + fileNameShort + '/thumbnail' )
#
ftp_send( out_file, ( "www/pype" + imagePypePath), ("www/pype" + imagePypePath + '/' + imageName) )
ftp_send( out_thumb, ( "www/pype" + thumbnailPypePath), ("www/pype" + thumbnailPypePath + '/' + thumbName) )
############################################################
def chdir(ftp, directory):
ch_dir_rec(ftp,directory.split('/'))
def directory_exists(ftp, directory):
filelist = []
ftp.retrlines('LIST',filelist.append)
for f in filelist:
if f.split()[-1] == directory and f.upper().startswith('D'):
return True
return False
def ch_dir_rec(ftp, descending_path_split):
if len(descending_path_split) == 0:
return
next_level_directory = descending_path_split.pop(0)
if not directory_exists(ftp,next_level_directory):
ftp.mkd(next_level_directory)
ftp.cwd(next_level_directory)
ch_dir_rec(ftp,descending_path_split)
def ftp_transfer(session, sourcefile, targetfile):
file = open(sourcefile,'rb') # file to send
session.storbinary(('STOR ' + targetfile), file) # send the file
file.close() # close file and FTP
session.quit()
def ftp_send(sourcefile, targetfolder, targetfile):
pypePath = os.environ['PYPE_PATH']
fileXML = (pypePath + '/pypeFTP.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('ftp')
#project = projectPath
for s in itemlist:
ftphome = s.attributes['ftphome'].value
log = s.attributes['log'].value
pw = s.attributes['pw'].value
try:
session = ftplib.FTP(ftphome,log, pw)
ftp = FTP(ftphome)
ftp.login(log, pw)
directory = targetfolder
chdir(ftp, directory)
#ftp_transfer(session, sourcefile, targetfile)
file = open(sourcefile,'rb') # file to send
session.storbinary(('STOR ' + targetfile), file) # send the file
file.close() # close file and FTP
session.quit()
except ftplib.all_errors:
print "Error during the FTP transfer!"
############################################################
pypeInit()
pypeMenuNk()
| mit | -3,904,076,662,467,990,500 | 42.763948 | 245 | 0.527651 | false |
bradkav/ATLASfits | DiphotonFits.py | 1 | 1961 | #--DiphotonFits.py - Version 1 - 04/02/2016
#--Author: Bradley J Kavanagh
#--Summary: Code for fitting the ATLAS diphoton data
#--and calculating the significance of the 750 GeV excess
#--Note: Requires emcee (http://dan.iel.fm/emcee/current/)
#--Please report any problems to: [email protected]
print "----Likelihood fits to ATLAS diphoton data---"
import numpy as np
from DiphotonFits_utils import getBestFit
#----Options----
#Print best-fit points to file (in fits folder)
saveResults = 1
#----Main procedure-----
BG_ID = ['k = 0 (fixed norm.)',
'k = 1 (fixed norm.)',
'k = 2 (fixed norm.)',
'k = 0 (free norm.)',
'k = 1 (free norm.)',
'k = 2 (free norm.)',]
SIGNAL_ID = ['Background-only',
'Signal+BG (NWA)',
'Signal+BG (Free-width)']
#Loop over possible background parametrisations
for i in range(6):
print "---------------------"
print "Background function:" ,BG_ID[i]
#Background-only fit
like_BG, bf_BG = getBestFit(i, 0)
print " Background-only"
print " lnL:", '{:.2f}'.format(like_BG)
if (saveResults):
np.savetxt('fits/Fits_BG=' + str(i) + '_BG-only.txt', bf_BG)
#Narrow width fit
like_NWA, bf_NWA = getBestFit(i, 1)
print " Signal+BG (NWA)"
print " lnL:", '{:.2f}'.format(like_NWA)
if (saveResults):
np.savetxt('fits/Fits_BG=' + str(i)+ '_NWA.txt', bf_NWA)
#Free width fit
like_wide, bf_wide = getBestFit(i, 2)
print " Signal+BG (Free width)"
print " lnL:", '{:.2f}'.format(like_wide)
if (saveResults):
np.savetxt('fits/Fits_BG=' + str(i) + '_wide.txt', bf_wide)
#Calculate significance
sig_NWA = np.sqrt(2*(like_NWA - like_BG))
sig_wide = np.sqrt(2*(like_wide - like_BG))
print " "
print " Significance (NWA):", '{:.2f}'.format(sig_NWA)
print " Significance (wide):", '{:.2f}'.format(sig_wide)
| mit | -6,611,880,352,664,472,000 | 29.169231 | 68 | 0.570117 | false |
yangautumn/turing_pattern | amorphous_pattern/grid_graph.py | 1 | 1941 | """
Simulate how Droplets will perform with Young's Model on Grid Graphs with different density
Author: Yang Li
Date: July 14, 2017
# randomly generate N droplets with diameter = 4.5cm within a square
of length 60cm
"""
from AmorphousGraph import *
# 'factor' value kind of means how much strength to push each activator againt each other
def grid_Graph(pattern_def, path_to, dens):
(pattern_idx, hight_a, width_a, hight_i, width_i, factor) = pattern_def
lims = [20, 20]
step_size = 1.0/np.sqrt(dens)
xs = []
ys = []
# randomly generate a set of coordination
# make sure there is no duplicated ones
for x in np.arange(0, lims[0]+1, step_size):
for y in np.arange(0, lims[1]+1, step_size):
xs.append(x)
ys.append(y)
print ("--> Coordination ready to go!")
# start of creating the instance of amorphous graph for pattern formation
num = len(xs)
ag = AmorphousGraph(num, hight_a, width_a, hight_i, width_i, pattern_idx, factor)
ag.initDroplets(xs, ys)
ag.initCircles()
print ("--> Amorphous Graph ready to go!")
for fi in np.arange(3, 10):
ag.resetColors()
ag.factor = (float)('%.2f' % (-0.1*fi))
counter = 0
while ag.oneLoop() > 0:
counter += 1
ag.drawPattern_Grid('dens{}'.format(dens), lims, path_to)
print ("--> Done with drawing with factor", ag.factor)
if __name__ == "__main__":
path_to = "D:/Github/Data/turing_pattern/Aug8_gridGraph_square"
path_to = "/home/yang/Dropbox/Data/turing_pattern/Aug8_gridGraph_square"
pattern_def_0 = (0, 2, 5, 6, 5, -0.50)
pattern_def_1 = (1, 5, 2, 5, 6, -0.50)
pattern_def_2 = (2, 3, 3, 5, 5, -0.55)
pattern_def_set = [pattern_def_0, pattern_def_1, pattern_def_2]
for pd in pattern_def_set:
for i in range(3, 10):
dens = i+1
print ("\n Start! -- [Pattern]:", pd, "[dens]:", dens)
t0 = time.time()
grid_Graph(pd, path_to, dens)
print ("\n Done! -- [Pattern]:", pd, "[dens]:", dens, "[Time used]:", time.time()-t0)
| gpl-3.0 | 1,260,177,515,249,739,000 | 25.958333 | 91 | 0.646574 | false |
rolandgeider/wger | wger/gym/tests/test_contract_options.py | 1 | 3231 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.urls import reverse
# wger
from wger.core.tests.base_testcase import (
WgerAccessTestCase,
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
delete_testcase_add_methods
)
from wger.gym.models import ContractOption
class AddContractOptionTestCase(WgerAddTestCase):
"""
Tests creating a new contract option
"""
object_class = ContractOption
url = reverse('gym:contract-option:add', kwargs={'gym_pk': 1})
data = {'name': 'Some name'}
user_success = ('manager1',
'manager2')
user_fail = ('admin',
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5')
class EditContractOptionTestCase(WgerEditTestCase):
"""
Tests editing a contract option
"""
pk = 1
object_class = ContractOption
url = 'gym:contract-option:edit'
user_success = ('manager1',
'manager2')
user_fail = ('admin',
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5')
data = {'name': 'Standard contract 16-Gj'}
class DeleteContractOptionTestCase(WgerDeleteTestCase):
"""
Tests deleting a contract option
"""
pk = 1
object_class = ContractOption
url = 'gym:contract-option:delete'
user_success = ('manager1',
'manager2')
user_fail = ('admin',
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5')
delete_testcase_add_methods(DeleteContractOptionTestCase)
class AccessContractOptionOverviewTestCase(WgerAccessTestCase):
"""
Test accessing the contract option page
"""
url = reverse('gym:contract-option:list', kwargs={'gym_pk': 1})
user_success = ('manager1',
'manager2')
user_fail = ('admin',
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5')
| agpl-3.0 | 1,165,271,795,223,467,500 | 27.095652 | 78 | 0.548128 | false |
jelly/calibre | src/calibre/devices/kobo/kobotouch_config.py | 1 | 19439 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import textwrap
from PyQt5.Qt import (QLabel, QGridLayout, QLineEdit, QVBoxLayout,
QDialog, QDialogButtonBox, QCheckBox)
from calibre.gui2.device_drivers.tabbed_device_config import TabbedDeviceConfig, DeviceConfigTab, DeviceOptionsGroupBox
from calibre.devices.usbms.driver import debug_print
def wrap_msg(msg):
return textwrap.fill(msg.strip(), 100)
def setToolTipFor(widget, tt):
widget.setToolTip(wrap_msg(tt))
def create_checkbox(title, tt, state):
cb = QCheckBox(title)
cb.setToolTip(wrap_msg(tt))
cb.setChecked(bool(state))
return cb
class KOBOTOUCHConfig(TabbedDeviceConfig):
def __init__(self, device_settings, all_formats, supports_subdirs,
must_read_metadata, supports_use_author_sort,
extra_customization_message, device, extra_customization_choices=None, parent=None):
super(KOBOTOUCHConfig, self).__init__(device_settings, all_formats, supports_subdirs,
must_read_metadata, supports_use_author_sort,
extra_customization_message, device, extra_customization_choices, parent)
self.device_settings = device_settings
self.all_formats = all_formats
self.supports_subdirs = supports_subdirs
self.must_read_metadata = must_read_metadata
self.supports_use_author_sort = supports_use_author_sort
self.extra_customization_message = extra_customization_message
self.extra_customization_choices = extra_customization_choices
self.tab1 = Tab1Config(self, self.device)
self.tab2 = Tab2Config(self, self.device)
self.addDeviceTab(self.tab1, _("Collections, covers && uploads"))
self.addDeviceTab(self.tab2, _('Metadata, on device && advanced'))
def get_pref(self, key):
return self.device.get_pref(key)
@property
def device(self):
return self._device()
def validate(self):
if hasattr(self, 'formats'):
if not self.formats.validate():
return False
if not self.template.validate():
return False
return True
@property
def book_uploads_options(self):
return self.tab1.book_uploads_options
@property
def collections_options(self):
return self.tab1.collections_options
@property
def cover_options(self):
return self.tab1.covers_options
@property
def device_list_options(self):
return self.tab2.device_list_options
@property
def advanced_options(self):
return self.tab2.advanced_options
@property
def metadata_options(self):
return self.tab2.metadata_options
def commit(self):
debug_print("KOBOTOUCHConfig::commit: start")
p = super(KOBOTOUCHConfig, self).commit()
p['manage_collections'] = self.manage_collections
p['create_collections'] = self.create_collections
p['collections_columns'] = self.collections_columns
p['ignore_collections_names'] = self.ignore_collections_names
p['delete_empty_collections'] = self.delete_empty_collections
p['upload_covers'] = self.upload_covers
p['keep_cover_aspect'] = self.keep_cover_aspect
p['upload_grayscale'] = self.upload_grayscale
p['show_recommendations'] = self.show_recommendations
p['show_previews'] = self.show_previews
p['show_archived_books'] = self.show_archived_books
p['update_series'] = self.update_series
p['modify_css'] = self.modify_css
p['support_newer_firmware'] = self.support_newer_firmware
p['debugging_title'] = self.debugging_title
p['driver_version'] = '.'.join([unicode(i) for i in self.device.version])
return p
class Tab1Config(DeviceConfigTab): # {{{
def __init__(self, parent, device):
super(Tab1Config, self).__init__(parent)
self.l = QVBoxLayout(self)
self.setLayout(self.l)
self.collections_options = CollectionsGroupBox(self, device)
self.l.addWidget(self.collections_options)
self.addDeviceWidget(self.collections_options)
self.covers_options = CoversGroupBox(self, device)
self.l.addWidget(self.covers_options)
self.addDeviceWidget(self.covers_options)
self.book_uploads_options = BookUploadsGroupBox(self, device)
self.l.addWidget(self.book_uploads_options)
self.addDeviceWidget(self.book_uploads_options)
# }}}
class Tab2Config(DeviceConfigTab): # {{{
def __init__(self, parent, device):
super(Tab2Config, self).__init__(parent)
self.l = QVBoxLayout(self)
self.setLayout(self.l)
self.metadata_options = MetadataGroupBox(self, device)
self.l.addWidget(self.metadata_options)
self.addDeviceWidget(self.metadata_options)
self.device_list_options = DeviceListGroupBox(self, device)
self.l.addWidget(self.device_list_options)
self.addDeviceWidget(self.device_list_options)
self.advanced_options = AdvancedGroupBox(self, device)
self.l.addWidget(self.advanced_options)
self.addDeviceWidget(self.advanced_options)
# }}}
class BookUploadsGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super(BookUploadsGroupBox, self).__init__(parent, device)
self.setTitle(_("Book uploading"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.modify_css_checkbox = create_checkbox(
_("Modify CSS"),
_('This allows addition of user CSS rules and removal of some CSS. '
'When sending a book, the driver adds the contents of {0} to all stylesheets in the EPUB. '
'This file is searched for in the root directory of the main memory of the device. '
'As well as this, if the file contains settings for the "orphans" or "widows", '
'these are removed for all styles in the original stylesheet.').format(device.KOBO_EXTRA_CSSFILE),
device.get_pref('modify_css')
)
self.options_layout.addWidget(self.modify_css_checkbox, 0, 0, 1, 2)
self.options_layout.setRowStretch(1, 1)
@property
def modify_css(self):
return self.modify_css_checkbox.isChecked()
class CollectionsGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super(CollectionsGroupBox, self).__init__(parent, device)
self.setTitle(_("Collections"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.setCheckable(True)
self.setChecked(device.get_pref('manage_collections'))
self.setToolTip(wrap_msg(_('Create new bookshelves on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.')))
self.collections_columns_label = QLabel(_('Collections columns:'))
self.collections_columns_edit = QLineEdit(self)
self.collections_columns_edit.setToolTip(_('The Kobo from firmware V2.0.0 supports bookshelves.'
' These are created on the Kobo. ' +
'Specify a tags type column for automatic management.'))
self.collections_columns_edit.setText(device.get_pref('collections_columns'))
self.create_collections_checkbox = create_checkbox(
_("Create collections"),
_('Create new bookshelves on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.'),
device.get_pref('create_collections')
)
self.delete_empty_collections_checkbox = create_checkbox(
_('Delete empty bookshelves'),
_('Delete any empty bookshelves from the Kobo when syncing is finished. This is only for firmware V2.0.0 or later.'),
device.get_pref('delete_empty_collections')
)
self.ignore_collections_names_label = QLabel(_('Ignore collections:'))
self.ignore_collections_names_edit = QLineEdit(self)
self.ignore_collections_names_edit.setToolTip(_('List the names of collections to be ignored by ' +
'the collection management. The collections listed ' +
'will not be changed. Names are separated by commas.'))
self.ignore_collections_names_edit.setText(device.get_pref('ignore_collections_names'))
self.options_layout.addWidget(self.collections_columns_label, 1, 0, 1, 1)
self.options_layout.addWidget(self.collections_columns_edit, 1, 1, 1, 1)
self.options_layout.addWidget(self.create_collections_checkbox, 2, 0, 1, 2)
self.options_layout.addWidget(self.delete_empty_collections_checkbox, 3, 0, 1, 2)
self.options_layout.addWidget(self.ignore_collections_names_label, 4, 0, 1, 1)
self.options_layout.addWidget(self.ignore_collections_names_edit, 4, 1, 1, 1)
self.options_layout.setRowStretch(4, 1)
@property
def manage_collections(self):
return self.isChecked()
@property
def collections_columns(self):
return self.collections_columns_edit.text().strip()
@property
def create_collections(self):
return self.create_collections_checkbox.isChecked()
@property
def delete_empty_collections(self):
return self.delete_empty_collections_checkbox.isChecked()
@property
def ignore_collections_names(self):
return self.ignore_collections_names_edit.text().strip()
class CoversGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super(CoversGroupBox, self).__init__(parent, device)
self.setTitle(_("Upload covers"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.setCheckable(True)
self.setChecked(device.get_pref('upload_covers'))
self.setToolTip(wrap_msg(_('Upload cover images from the calibre library when sending books to the device.')))
self.upload_grayscale_checkbox = create_checkbox(
_('Upload black and white covers'),
_('Convert covers to black and white when uploading'),
device.get_pref('upload_grayscale')
)
self.keep_cover_aspect_checkbox = create_checkbox(
_('Keep cover aspect ratio'),
_('When uploading covers, do not change the aspect ratio when resizing for the device.'
' This is for firmware versions 2.3.1 and later.'),
device.get_pref('keep_cover_aspect'))
self.options_layout.addWidget(self.keep_cover_aspect_checkbox, 0, 0, 1, 1)
self.options_layout.addWidget(self.upload_grayscale_checkbox, 1, 0, 1, 1)
self.options_layout.setRowStretch(2, 1)
@property
def upload_covers(self):
return self.isChecked()
@property
def upload_grayscale(self):
return self.upload_grayscale_checkbox.isChecked()
@property
def keep_cover_aspect(self):
return self.keep_cover_aspect_checkbox.isChecked()
class DeviceListGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super(DeviceListGroupBox, self).__init__(parent, device)
self.setTitle(_("Show as on device"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.show_recommendations_checkbox = create_checkbox(
_("Show recommendations"),
_('Kobo shows recommendations on the device. In some cases these have '
'files but in other cases they are just pointers to the web site to buy. '
'Enable if you wish to see/delete them.'),
device.get_pref('show_recommendations')
)
self.show_archived_books_checkbox = create_checkbox(
_("Show archived books"),
_('Archived books are listed on the device but need to be downloaded to read.'
' Use this option to show these books and match them with books in the calibre library.'),
device.get_pref('show_archived_books')
)
self.show_previews_checkbox = create_checkbox(
_('Show previews'),
_('Kobo previews are included on the Touch and some other versions'
' by default they are no longer displayed as there is no good reason to '
'see them. Enable if you wish to see/delete them.'),
device.get_pref('show_previews')
)
self.options_layout.addWidget(self.show_recommendations_checkbox, 0, 0, 1, 1)
self.options_layout.addWidget(self.show_archived_books_checkbox, 1, 0, 1, 1)
self.options_layout.addWidget(self.show_previews_checkbox, 2, 0, 1, 1)
self.options_layout.setRowStretch(3, 1)
@property
def show_recommendations(self):
return self.show_recommendations_checkbox.isChecked()
@property
def show_archived_books(self):
return self.show_archived_books_checkbox.isChecked()
@property
def show_previews(self):
return self.show_previews_checkbox.isChecked()
class AdvancedGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super(AdvancedGroupBox, self).__init__(parent, device, _("Advanced options"))
# self.setTitle(_("Advanced Options"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.support_newer_firmware_checkbox = create_checkbox(
_("Attempt to support newer firmware"),
_('Kobo routinely updates the firmware and the '
'database version. With this option calibre will attempt '
'to perform full read-write functionality - Here be Dragons!! '
'Enable only if you are comfortable with restoring your kobo '
'to factory defaults and testing software. '
'This driver supports firmware V2.x.x and DBVersion up to ') + unicode(
device.supported_dbversion), device.get_pref('support_newer_firmware')
)
self.debugging_title_checkbox = create_checkbox(
_("Title to test when debugging"),
_('Part of title of a book that can be used when doing some tests for debugging. '
'The test is to see if the string is contained in the title of a book. '
'The better the match, the less extraneous output.'),
device.get_pref('debugging_title')
)
self.debugging_title_label = QLabel(_('Title to test when debugging:'))
self.debugging_title_edit = QLineEdit(self)
self.debugging_title_edit.setToolTip(_('Part of title of a book that can be used when doing some tests for debugging. '
'The test is to see if the string is contained in the title of a book. '
'The better the match, the less extraneous output.'))
self.debugging_title_edit.setText(device.get_pref('debugging_title'))
self.debugging_title_label.setBuddy(self.debugging_title_edit)
self.options_layout.addWidget(self.support_newer_firmware_checkbox, 0, 0, 1, 2)
self.options_layout.addWidget(self.debugging_title_label, 1, 0, 1, 1)
self.options_layout.addWidget(self.debugging_title_edit, 1, 1, 1, 1)
self.options_layout.setRowStretch(2, 2)
@property
def support_newer_firmware(self):
return self.support_newer_firmware_checkbox.isChecked()
@property
def debugging_title(self):
return self.debugging_title_edit.text().strip()
class MetadataGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super(MetadataGroupBox, self).__init__(parent, device)
self.setTitle(_("Update metadata on the device"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.setCheckable(True)
self.setChecked(device.get_pref('update_device_metadata'))
self.setToolTip(wrap_msg(_('Update the metadata on the device when it is connected. '
'Be careful when doing this as it will take time and could make the initial connection take a long time.')))
self.update_series_checkbox = create_checkbox(
_("Set series information"),
_('The book lists on the Kobo devices can display series information. '
'This is not read by the device from the sideloaded books. '
'Series information can only be added to the device after the book has been processed by the device. '
'Enable if you wish to set series information.'),
device.get_pref('update_series')
)
self.options_layout.addWidget(self.update_series_checkbox, 0, 0, 1, 1)
self.options_layout.setRowStretch(1, 1)
@property
def update_series(self):
return self.update_series_checkbox.isChecked()
@property
def update_device_metadata(self):
return self.isChecked()
if __name__ == '__main__':
from calibre.gui2 import Application
from calibre.devices.kobo.driver import KOBOTOUCH
from calibre.devices.scanner import DeviceScanner
s = DeviceScanner()
s.scan()
app = Application([])
debug_print("KOBOTOUCH:", KOBOTOUCH)
dev = KOBOTOUCH(None)
# dev.startup()
# cd = dev.detect_managed_devices(s.devices)
# dev.open(cd, 'test')
cw = dev.config_widget()
d = QDialog()
d.l = QVBoxLayout()
d.setLayout(d.l)
d.l.addWidget(cw)
bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
d.l.addWidget(bb)
bb.accepted.connect(d.accept)
bb.rejected.connect(d.reject)
if d.exec_() == d.Accepted:
cw.commit()
dev.shutdown()
| gpl-3.0 | 7,033,228,520,682,415,000 | 40.625268 | 142 | 0.618705 | false |
AnalogJ/lexicon | lexicon/tests/providers/test_hover.py | 1 | 2518 | """Integration tests for Hover"""
import json
import re
from unittest import TestCase
from lexicon.tests.providers.integration_tests import IntegrationTestsV2
class HoverProviderTests(TestCase, IntegrationTestsV2):
"""TestCase for Hover"""
provider_name = "hover"
domain = "novuslex.com"
domain_id = "dom1127777"
hoverauth = "0123456789abcdef0123456789abcdef"
hover_session = "0123456789abcdef0123456789abcdef"
def _filter_post_data_parameters(self):
return ["username", "password"]
def _filter_headers(self):
return ["Cookie"]
def _filter_query_parameters(self):
return ["hover_session", "hoverauth"]
def _replace_auth(self, cookie):
cookie = re.sub(
"hover_session=.*;", f"hover_session={self.hover_session};", cookie
)
cookie = re.sub("hoverauth=.*;", f"hoverauth={self.hoverauth};", cookie)
return cookie
def _filter_response(self, response):
if "basestring" not in globals():
basestring = str
if "set-cookie" in response["headers"]:
if isinstance(response["headers"]["set-cookie"], basestring):
response["headers"]["set-cookie"] = self._replace_auth(
response["headers"]["set-cookie"]
)
else:
for i, cookie in enumerate(response["headers"]["set-cookie"]):
response["headers"]["set-cookie"][i] = self._replace_auth(cookie)
try:
filtered_body = json.loads(response["body"]["string"].decode("UTF-8"))
except ValueError:
# Body is not json during authentication, so we let it through.
# Helper function _request in hover.py will raise exception when
# response is not json and it should be.
pass
else:
# filter out my personal contact information
if "contact" in filtered_body:
del filtered_body["contact"]
# if the response is listing all my domains then return a mock response
if "domains" in filtered_body and len(filtered_body["domains"]) > 1:
filtered_body["domains"] = [
{
"id": self.domain_id,
"domain_name": self.domain,
"status": "active",
}
]
response["body"]["string"] = json.dumps(filtered_body).encode("UTF-8")
return response
| mit | -8,230,661,576,582,441,000 | 34.971429 | 85 | 0.571485 | false |
RENCI/xDCIShare | hs_core/tests/api/views/test_unshare_resource.py | 1 | 11042 | import json
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.exceptions import PermissionDenied
from hs_core import hydroshare
from hs_core.views import unshare_resource_with_user, unshare_resource_with_group
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.models import PrivilegeCodes
class TestUnshareResource(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(TestUnshareResource, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.owner = hydroshare.create_account(
'[email protected]',
username='john',
first_name='John',
last_name='Clarson',
superuser=False,
password='jhmypassword',
groups=[]
)
self.user = hydroshare.create_account(
'[email protected]',
username='lisaZ',
first_name='Lisa',
last_name='Ziggler',
superuser=False,
password='lzmypassword',
groups=[]
)
self.unauthorized_user = hydroshare.create_account(
'[email protected]',
username='garyB',
first_name='Gary',
last_name='Brandon',
superuser=False,
password='gbmypassword',
groups=[]
)
# crate a group for testing group access to resource
self.test_group = self.owner.uaccess.create_group(
title='Test Group',
description="This is to test group access to resource",
purpose="Testing group access to resource")
self.gen_res = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.owner,
title='Generic Resource Share Resource Testing'
)
self.factory = RequestFactory()
def test_unshare_resource_with_user(self):
# here we are testing the unshare_resource_with_user view function
# test unshare resource with self.user
# test self.user has no view permission
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
# grant view access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.VIEW)
# test self.user has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.view_users)
self._check_unshare_with_user()
# test self.user has no view permission
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
# grant edit access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.CHANGE)
# test self.user has now edit permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.edit_users)
self._check_unshare_with_user()
# test self.user has no edit permission
self.assertNotIn(self.user, self.gen_res.raccess.edit_users)
# grant owner access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.OWNER)
# test self.user has now owner permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.owners)
self._check_unshare_with_user()
# test self.user has no owner permission
self.assertNotIn(self.user, self.gen_res.raccess.owners)
# clean up
hydroshare.delete_resource(self.gen_res.short_id)
def test_unshare_resource_with_self(self):
# here we are testing the unshare_resource_with_user view function
# test unshare resource with self.user by self.user
# test self.user has no view permission
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
# grant view access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.VIEW)
# test self.user has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.view_users)
url_params = {'shortkey': self.gen_res.short_id, 'user_id': self.user.id}
url = reverse('unshare_resource_with_user', kwargs=url_params)
request = self.factory.post(url, data={})
# self unsharing
request.user = self.user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
response = unshare_resource_with_user(request, shortkey=self.gen_res.short_id,
user_id=self.user.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = json.loads(response.content)
self.assertEqual(response_data['status'], 'success')
self.assertEqual(response_data['redirect_to'], '/my-resources/')
self.gen_res.raccess.refresh_from_db()
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
def test_unshare_resource_with_user_bad_request(self):
# here we are testing the unshare_resource_with_user view function
# test unshare resource with self.user by unauthorized_user
# test self.user has no view permission
self.assertNotIn(self.user, self.gen_res.raccess.view_users)
# grant view access to self.user
self.owner.uaccess.share_resource_with_user(self.gen_res, self.user, PrivilegeCodes.VIEW)
# test self.user has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.view_users)
url_params = {'shortkey': self.gen_res.short_id, 'user_id': self.user.id}
url = reverse('unshare_resource_with_user', kwargs=url_params)
request = self.factory.post(url, data={})
# unauthorized user trying to remove access of self.user
request.user = self.unauthorized_user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
with self.assertRaises(PermissionDenied):
unshare_resource_with_user(request, shortkey=self.gen_res.short_id,
user_id=self.user.id)
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.user, self.gen_res.raccess.view_users)
def test_unshare_resource_with_group(self):
# here we are testing the unshare_resource_with_group view function
# test unshare resource with self.test_group
# test self.test_group has no view permission
self.assertNotIn(self.test_group, self.gen_res.raccess.view_groups)
# grant view access to self.test_group
self.owner.uaccess.share_resource_with_group(self.gen_res, self.test_group,
PrivilegeCodes.VIEW)
# test self.test_group has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.test_group, self.gen_res.raccess.view_groups)
self._check_unshare_with_group()
# test self.test_group has no view permission
self.assertNotIn(self.test_group, self.gen_res.raccess.view_groups)
# grant edit access to test_group
self.owner.uaccess.share_resource_with_group(self.gen_res, self.test_group,
PrivilegeCodes.CHANGE)
# test test_group has now edit permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.test_group, self.gen_res.raccess.edit_groups)
self._check_unshare_with_group()
# test test_group has no edit permission
self.assertNotIn(self.test_group, self.gen_res.raccess.edit_groups)
# clean up
hydroshare.delete_resource(self.gen_res.short_id)
def test_unshare_resource_with_group_bad_request(self):
# here we are testing the unshare_resource_with_group view function
# test unshare resource with test_group by unauthorized_user
# test test_group has no view permission
self.assertNotIn(self.test_group, self.gen_res.raccess.view_groups)
# grant view access to test_group
self.owner.uaccess.share_resource_with_group(self.gen_res, self.test_group,
PrivilegeCodes.VIEW)
# test test_group has now view permission
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.test_group, self.gen_res.raccess.view_groups)
url_params = {'shortkey': self.gen_res.short_id, 'group_id': self.test_group.id}
url = reverse('unshare_resource_with_group', kwargs=url_params)
request = self.factory.post(url, data={})
# unauthorized user trying to remove access of test_group
request.user = self.unauthorized_user
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
with self.assertRaises(PermissionDenied):
unshare_resource_with_group(request, shortkey=self.gen_res.short_id,
group_id=self.test_group.id)
self.gen_res.raccess.refresh_from_db()
self.assertIn(self.test_group, self.gen_res.raccess.view_groups)
def _check_unshare_with_user(self):
url_params = {'shortkey': self.gen_res.short_id, 'user_id': self.user.id}
url = reverse('unshare_resource_with_user', kwargs=url_params)
request = self.factory.post(url, data={})
request.user = self.owner
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
response = unshare_resource_with_user(request, shortkey=self.gen_res.short_id,
user_id=self.user.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = json.loads(response.content)
self.assertEqual(response_data['status'], 'success')
self.gen_res.raccess.refresh_from_db()
def _check_unshare_with_group(self):
url_params = {'shortkey': self.gen_res.short_id, 'group_id': self.test_group.id}
url = reverse('unshare_resource_with_group', kwargs=url_params)
request = self.factory.post(url, data={})
request.user = self.owner
# make it a ajax request
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
response = unshare_resource_with_group(request, shortkey=self.gen_res.short_id,
group_id=self.test_group.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = json.loads(response.content)
self.assertEqual(response_data['status'], 'success')
self.gen_res.raccess.refresh_from_db()
| bsd-3-clause | 5,025,365,695,839,599,000 | 43.524194 | 99 | 0.641369 | false |
DeflatedPickle/Colony | colony/time.py | 1 | 1986 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""""""
from tkinter import IntVar
__title__ = "Time"
__author__ = "DeflatedPickle"
__version__ = "1.3.1"
class Time(object):
def __init__(self, hours: int = 0, minutes: int = 0, seconds: int = 0):
self._hours = IntVar(value=hours)
self._minutes = IntVar(value=minutes)
self._seconds = IntVar(value=seconds)
self.check_time()
def get_time(self):
"""Returns the current time."""
return int("".join(map(str, [self._hours.get(), self._minutes.get(), self._seconds.get()])))
def get_time_formatted(self):
"""Returns the time formatted for readability."""
return "{}:{}:{}".format(self._hours.get(), self._minutes.get(), self._seconds.get())
def get_hours(self):
"""Returns the hours."""
return self._hours
def get_minutes(self):
"""Returns the minutes."""
return self._minutes
def get_seconds(self):
"""Returns the seconds."""
return self._seconds
def set_time(self, hours, minutes, seconds):
"""Sets the time to an amount."""
if hours > 0:
self._hours.set(hours)
if minutes > 0:
self._minutes.set(minutes)
if seconds > 0:
self._seconds.set(seconds)
self.check_time()
def increase_time(self, hours, minutes, seconds):
"""Increases the time by an amount."""
self.set_time(self._hours.get() + hours, self._minutes.get() + minutes, self._seconds.get() + seconds)
self.check_time()
def check_time(self):
"""Checks the time and increments it if it's over."""
if self._seconds.get() >= 60:
self._seconds.set(0)
self._minutes.set(self._minutes.get() + 1)
if self._minutes.get() >= 60:
self._minutes.set(0)
self._hours.set(self._hours.get() + 1)
if self._hours.get() >= 24:
self._hours.set(0)
| mit | -9,133,802,816,021,608,000 | 27.371429 | 110 | 0.552367 | false |
Sinar/telus | lib/pymg3.py | 1 | 7566 | #!/usr/bin/python
"""
This is a module for dealing with MongoDB via PyMongo.
Use this module to manage databases and collections in MongoDB using
the Python driver, PyMongo. The API operation commands have slight
differences between `mongo` shell and `pymongo` in Python scripts.
MongoDB manual (https://docs.mongodb.com/manual/) has notable links
to Getting Started Guide. For writing codes in Python scripts, look
into "Python Edition" instead of "mongo Shell Edition".
For full reference, see MongoDB Ecosystem - Python MongoDB drivers
(https://docs.mongodb.com/ecosystem/drivers/python/) that provides
links to API documentation and other resources.
This module was written with API operation commands that are valid
for PyMongo 3.0 and newer. Avoid deprecated API mentioned by docs.
"""
from __future__ import print_function
import json
import pymongo
from bson.objectid import ObjectId
from bson import json_util
def test_conn(host, port):
"""Test connection to MongoDB server."""
try:
client = pymongo.MongoClient(
host,
port,
connectTimeoutMS=2000,
serverSelectionTimeoutMS=3000)
client.admin.command("ismaster")
except pymongo.errors.ConnectionFailure:
print('Failed to connect')
raise RuntimeError('Server not available', host, port)
else:
print('Connected to server')
return client
def get_conn(host, port):
"""Return versions of MongoDB and PyMongo when available."""
client = test_conn(host, port)
server_version = client.server_info()['version']
driver_version = pymongo.version
print('Using MongoDB {0} with PyMongo {1}'.format(
server_version, driver_version))
return server_version, driver_version
def use_conn(host, port):
"""Return client for a MongoDB instance."""
client = test_conn(host, port)
return client
def set_database(client, dbname):
"""Return database with specified name on MongoDB."""
database = client[dbname]
print('Setup database: {}'.format(database.name))
return database
def set_collection(client, dbname, ccname):
"""Return collection with specified name on MongoDB."""
collection = client[dbname][ccname]
print('Setup collection: {}'.format(collection.name))
return collection
def use_setup(client, dbname, ccname):
"""Return database and collection that were setup on MongoDB."""
database = set_database(client, dbname)
collection = set_collection(client, dbname, ccname)
return database, collection
def list_objects(fpath):
"""Return list of objects from specified file."""
obj_ls = []
for each in open(fpath, 'r'):
obj_ls.append(each)
return obj_ls
def drop_objects(collection):
"""Remove all objects from specified collection if not empty."""
if collection.count() != 0:
print('{} was not empty, drop first'.format(collection.name))
collection.drop()
def find_object(collection):
"""
Return one JSON object from specified collection.
"""
obj = collection.find_one()
parse = json.dumps(obj, default=json_util.default, sort_keys=True)
return parse
def find_objects(collection, args):
"""Return JSON objects from specified collection if any."""
print('Query argument: {}'.format(args))
obj_ls = []
if type(args) is type({}):
obj = collection.find(args)
obj = list(obj)
count = 0
for each in obj:
count = count + 1
parse = json.dumps(each, default=json_util.default,
sort_keys=True)
obj_ls.append(parse)
if count == 0:
print('Not found')
return None
elif count == 1:
print('Found one object')
return obj_ls[0]
else:
print('Found {} objects in a list'.format(count))
return obj_ls
else:
print('Did not find')
raise TypeError('Unexpected type of argument', type(args))
def show_object(collection):
"""
Show one JSON object from specified collection in MongoDB. This
depends on find_object function that return an object.
"""
obj = find_object(collection)
print('Show first object: {}'.format(obj))
def show_objects(collection, args):
"""Show JSON objects from specified collection in MongoDB."""
obj = find_objects(collection, args)
if type(obj) is type(''):
print('Show target object: {}'.format(obj))
elif type(obj) is type([]):
print('Show only first 3 objects:')
num = 0
for each in obj:
print(each)
num = num + 1
if num == 3:
break
else:
raise TypeError('Unexpected type of object', type(obj))
def scan_field(obj, string):
"""Match non-empty value for specified string in JSON object."""
value = obj[string]
ismatch = False
if value != "":
ismatch = True
return ismatch
def copy_field(obj, string):
"""Return standalone object of specified string in JSON object."""
value = obj[string]
new_obj = {string: value}
return new_obj
def store_objects(collection, fpath):
"""Store objects from JSONL into MongoDB."""
print('Store objects into {}'.format(collection.name))
obj_ls = list_objects(fpath)
for each in obj_ls:
obj = json.loads(each)
collection.insert_one(obj)
print('Inserted objects: {}'.format(collection.count()))
def store_nested(client, collection, fpath):
"""
Store objects and the contained nested objects from JSONL into
MongoDB. The nested objects are expected to be found in objects
from JSONL file and have been predefined (buyer, seller).
"""
print('Store source objects and nested objects')
buyers = set_collection(client, 'telus', 'buyers')
drop_objects(buyers)
sellers = set_collection(client, 'telus', 'sellers')
drop_objects(sellers)
obj_ls = list_objects(fpath)
for each in obj_ls:
obj = json.loads(each)
buyer_string = 'offering_office' # non-OCDS
if scan_field(obj, buyer_string):
buyers.insert_one(copy_field(obj, buyer_string))
seller_string = 'contractor' # non-OCDS
if scan_field(obj, seller_string):
sellers.insert_one(copy_field(obj, seller_string))
collection.insert_one(obj)
print('Inserted buyers: {}'.format(buyers.count()))
print('Inserted sellers: {}'.format(sellers.count()))
print('Inserted source objects: {}'.format(collection.count()))
def show_nested(client, collection):
"""
Show object and the contained nested objects that have been stored
in respective collections in MongoDB. The nested objects have been
predefined (buyer, seller).
"""
print('Show source object and nested objects')
target = find_object(collection)
print('Source object: {}'.format(target))
buyers = set_collection(client, 'telus', 'buyers')
sellers = set_collection(client, 'telus', 'sellers')
print('Nested objects:')
target = json.loads(target)
buyer_string = 'offering_office' # non-OCDS
if scan_field(target, buyer_string):
show_objects(buyers, copy_field(target, buyer_string))
else:
print('No available buyer')
seller_string = 'contractor' # non-OCDS
if scan_field(target, seller_string):
show_objects(sellers, copy_field(target, seller_string))
else:
print('No available seller')
| gpl-3.0 | -5,252,623,188,029,648,000 | 33.866359 | 70 | 0.649749 | false |
aerospace-notebook/aerospace-notebook | nbimport.py | 1 | 2719 | """
Module.
"""
import io, os, sys, types
from IPython import nbformat
from IPython.core.interactiveshell import InteractiveShell
def find_notebook(fullname, path=None):
"""find a notebook, given its fully qualified name and an optional path
This turns "foo.bar" into "foo/bar.ipynb"
and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar
does not exist.
"""
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
# let import Notebook_Name find "Notebook Name.ipynb"
nb_path = nb_path.replace("_", " ")
if os.path.isfile(nb_path):
return nb_path
class NotebookLoader(object):
"""Module Loader for IPython Notebooks"""
def __init__(self, path=None):
self.shell = InteractiveShell.instance()
self.path = path
def load_module(self, fullname):
"""import a notebook as a module"""
path = find_notebook(fullname, self.path)
print ("importing IPython notebook from %s" % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = nbformat.read(f, 4)
# create the module and add it to sys.modules
# if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb['cells']:
if cell['cell_type'] == 'code':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell['source'])
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod
class NotebookFinder(object):
"""Module finder that locates IPython Notebooks"""
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if not nb_path:
return
key = path
if path:
# lists aren't hashable
key = os.path.sep.join(path)
if key not in self.loaders:
self.loaders[key] = NotebookLoader(path)
return self.loaders[key]
sys.meta_path.append(NotebookFinder())
| bsd-3-clause | -4,277,971,977,055,841,000 | 30.988235 | 90 | 0.579257 | false |
vallemrv/tpvB3 | tpv_for_eetop/tpv/controllers/arqueo.py | 1 | 6717 | # -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 10-May-2017
# @Email: [email protected]
# @Last modified by: valle
# @Last modified time: 17-Mar-2018
# @License: Apache license vesion 2.0
from kivy.uix.anchorlayout import AnchorLayout
from kivy.properties import ObjectProperty, StringProperty
from kivy.lang import Builder
from kivy.storage.jsonstore import JsonStore
from kivy.clock import Clock
from kivy.core import Logger
from kivy.network.urlrequest import UrlRequest
from controllers.lineaarqueo import LineaArqueo
from valle_libs.tpv.impresora import DocPrint
from valle_libs.utils import parse_float
from models.db import QSon, VentasSender
from config import config
from modals import Aceptar
from glob import glob
from os import rename
from datetime import datetime
from time import strftime
import urllib
import threading
import json
Builder.load_file("view/arqueo.kv")
class Arqueo(AnchorLayout):
tpv = ObjectProperty(None)
text_cambio = StringProperty("300")
url = config.URL_SERVER+"/ventas/arquear/"
def __on_success__(self, req, result):
self.tpv.hide_spin()
if result["success"] == True:
desglose = result["desglose"]
self.tpv.mostrar_inicio()
printDoc = DocPrint()
printDoc.printDesglose("caja", self.fecha, desglose)
def __got_error__(self, req, *args):
req._resp_status = "Error"
Logger.debug("got error {0}".format(req.url))
self.tpv.hide_spin()
def __got_fail__(self, req, *args):
req._resp_status = "Fail"
Logger.debug("got fail {0}".format(req.url))
self.tpv.hide_spin()
def __got_redirect__(self, req, *args):
req._resp_status = "Redirect"
Logger.debug("got redirect {0}".format(req.url))
self.tpv.hide_spin()
def send(self, data):
SEND_DATA = {'data':json.dumps(data)}
data = urllib.urlencode(SEND_DATA)
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/json'}
r = UrlRequest(self.url, on_success=self.__on_success__, req_body=data,
req_headers=headers, method="POST",
on_failure=self.__got_fail__,
on_error=self.__got_error__,
on_redirect=self.__got_redirect__)
def nuevo_arqueo(self):
self.lista_conteo = []
self.lista_gastos = []
self.lista_ticket = []
self.lista_ingresos = []
self.fecha = ""
self.caja_dia = 0.0
self.efectivo = 0.0
self.tarjeta = 0.0
self.total_gastos = 0.0
self.conteo.rm_all_widgets()
self.gastos.rm_all_widgets()
self.ingresos.rm_all_widgets()
sender = VentasSender()
sender.filter(QSon("Pedidos", estado__contains="NPG"))
sender.send(self.comprobar_npg, wait=False)
def comprobar_npg(self, req, r):
if r["success"] == True:
if len(r["get"]['pedidos']) > 0:
self.aceptar = Aceptar(onExit=self.salir_arqueo)
self.aceptar.open()
def salir_arqueo(self):
if self.aceptar != None:
self.aceptar.dismiss()
self.tpv.mostrar_inicio()
def arquear(self):
self.fecha = str(datetime.now())
if self.cambio == "":
self.cambio = 300.00
self.lista_conteo = sorted(self.lista_conteo, key=lambda k: k["tipo"],
reverse=True)
self.run_arqueo()
def run_arqueo(self):
arqueo = {'caja_dia': self.caja_dia,
'efectivo':self.efectivo,
'cambio':self.cambio,
'total_gastos':self.total_gastos,
'tarjeta':self.tarjeta,
'descuadre':0,
'conteo':[],
'gastos':[],
'extras': []}
for conteo in self.lista_conteo:
arqueo['conteo'].append(conteo)
for gasto in self.lista_gastos:
arqueo['gastos'].append(gasto)
for ing in self.lista_ingresos:
arqueo['extras'].append(ing)
self.send(arqueo)
self.tpv.show_spin()
def add_conteo(self, _can, _tipo):
can = _can.text
tipo = parse_float(_tipo.text)
_can.text = _tipo.text = ""
linea = LineaArqueo(borrar=self.borrar_conteo)
texto_tipo = "Monedas" if tipo < 5 else "Billetes"
linea.text = u"{0: >5} {1} de {2}".format(can, texto_tipo, tipo)
linea.total = parse_float(can) * tipo
linea.tag = {"can": can, "tipo": tipo,
"texto_tipo": texto_tipo,
"total": linea.total}
self.efectivo += linea.total
self.lista_conteo.append(linea.tag)
self.conteo.add_linea(linea)
def borrar_conteo(self, linea):
self.efectivo -= linea.total
self.lista_conteo.remove(linea.tag)
self.conteo.rm_linea(linea)
def add_gasto(self, _des, _gasto):
des = _des.text
gasto = _gasto.text
_des.text = _gasto.text = ""
linea = LineaArqueo(borrar=self.borrar_gasto)
linea.text = u"{0} ".format(des)
linea.total = parse_float(gasto)
linea.tag = {"des": des, "gasto": gasto}
self.total_gastos += linea.total
self.lista_gastos.append(linea.tag)
self.gastos.add_linea(linea)
def borrar_gasto(self, linea):
self.total_gastos -= linea.total
self.lista_gastos.remove(linea.tag)
self.gastos.rm_linea(linea)
def add_ingreso(self, num_pd, importe, modo_pago):
_num_pd = num_pd.text
_importe = importe.text
linea = LineaArqueo(borrar=self.borrar_ingreso)
_modo_pago = "Efectivo" if not modo_pago.active else "Tarjeta"
linea.text = u"Peddos {0} modo pago {1} ".format(_num_pd, _modo_pago)
linea.total = parse_float(_importe)
linea.tag = {"numero_pedido": _num_pd, "importe": _importe,
"modo_pago": _modo_pago, "estado": "arqueado"}
if _modo_pago == "Tarjeta":
self.tarjeta += linea.total
else:
self.caja_dia += linea.total
num_pd.text = importe.text = ""
modo_pago.active = False
self.lista_ingresos.append(linea.tag)
self.ingresos.add_linea(linea)
def borrar_ingreso(self, linea):
modo_pago = linea.tag.get("modo_pago")
if modo_pago == "Tarjeta":
self.tarjeta -= linea.total
else:
self.caja_dia -= linea.total
self.lista_ingresos.remove(linea.tag)
self.ingresos.rm_linea(linea)
| apache-2.0 | -6,670,678,691,411,380,000 | 31.765854 | 79 | 0.57749 | false |
kingname/Bi_BiBi | jikexueyuan/webControlWithDataBase.py | 1 | 1508 | #--coding:utf8--
from flask.ext.bootstrap import Bootstrap
from flask import Flask, render_template, redirect
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, TextAreaField
from util.DataBaseManager import DataBaseManager
app = Flask(__name__)
bootstrap = Bootstrap(app)
app. config['SECRET_KEY'] = 'youcouldneverknowhis-name'
app.config.from_object(__name__)
class contentForm(Form):
commandInConfig = StringField(u'')
commandInWrite = TextAreaField(u'', default="")
sendCommand = SubmitField(u'发送命令')
clearCommand = SubmitField(u'清空命令')
@app.route('/', methods=['GET', 'POST'])
def index():
form = contentForm()
dataBaseManager = DataBaseManager()
if form.validate_on_submit():
innerCommand = form.commandInConfig.data
writeCommand = form.commandInWrite.data
if not (innerCommand or writeCommand):
errorinfo = u'内置命令和自定义代码至少要写一个!'
form.commandInWrite.data = ''
form.commandInConfig.data = ''
return render_template('index.html', form=form, errorinfo=errorinfo)
else:
info = {'innerCommand': innerCommand, 'writeCommand': writeCommand, 'run': False}
dataBaseManager.insert(info)
return redirect('/')
return render_template('index.html', form=form, errorinfo='')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, threaded=True, debug=True)
app.run(processes=10)
| gpl-3.0 | -7,558,096,615,025,651,000 | 34.560976 | 93 | 0.674897 | false |
Arello-Mobile/sphinx-confluence | sphinx_confluence/__init__.py | 1 | 20132 | # -*- coding: utf-8 -*-
"""
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format
"""
from distutils.version import LooseVersion
import os
from docutils import nodes
from docutils.parsers.rst import directives, Directive, roles
from docutils.parsers.rst.directives import images
from docutils.parsers.rst.roles import set_classes
import sphinx
try:
from sphinx.builders.html import JSONHTMLBuilder
except ImportError:
from sphinxcontrib.serializinghtml import JSONHTMLBuilder
from sphinx.directives.code import CodeBlock
from sphinx.locale import _
from sphinx.writers.html import HTMLTranslator
def true_false(argument):
return directives.choice(argument, ('true', 'false'))
def static_dynamic(argument):
return directives.choice(argument, ('static', 'dynamic'))
class TitlesCache(object):
titles = {}
@staticmethod
def _document_key(document):
return hash(document)
@classmethod
def set_title(cls, document, title):
cls.titles[cls._document_key(document)] = title
@classmethod
def get_title(cls, document):
return cls.titles.get(cls._document_key(document), None)
@classmethod
def has_title(cls, document):
return cls._document_key(document) in cls.titles
class JSONConfluenceBuilder(JSONHTMLBuilder):
"""For backward compatibility"""
name = 'json_conf'
def __init__(self, app):
super(JSONConfluenceBuilder, self).__init__(app)
if LooseVersion(sphinx.__version__) >= LooseVersion("1.4"):
self.translator_class = HTMLConfluenceTranslator
self.warn('json_conf builder is deprecated and will be removed in future releases')
class HTMLConfluenceTranslator(HTMLTranslator):
def unimplemented_visit(self, node):
self.builder.warn('Unimplemented visit is not implemented for node: {}'.format(node))
def unknown_visit(self, node):
self.builder.warn('Unknown visit is not implemented for node: {}'.format(node))
def visit_admonition(self, node, name=''):
"""
Info, Tip, Note, and Warning Macros
https://confluence.atlassian.com/conf58/info-tip-note-and-warning-macros-771892344.html
<ac:structured-macro ac:name="info">
<ac:parameter ac:name="icon">false</ac:parameter>
<ac:parameter ac:name="title">This is my title</ac:parameter>
<ac:rich-text-body>
<p>
This is important information.
</p>
</ac:rich-text-body>
</ac:structured-macro>
"""
confluence_admonition_map = {
'note': 'info',
'warning': 'note',
'attention': 'note',
'hint': 'tip',
'tip': 'tip',
'important': 'warning',
'error': 'warning',
'danger': 'warning',
}
admonition_type = confluence_admonition_map.get(name, 'info')
macro = """\
<ac:structured-macro ac:name="{admonition_type}">
<ac:parameter ac:name="icon">true</ac:parameter>
<ac:parameter ac:name="title"></ac:parameter>
<ac:rich-text-body>
"""
self.body.append(macro.format(admonition_type=admonition_type))
def depart_admonition(self, node=None):
macro = """
</ac:rich-text-body>
</ac:structured-macro>\n
"""
self.body.append(macro)
def imgtag(self, filename, suffix='\n', **attributes):
"""
Attached image
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format#ConfluenceStorageFormat-Images
<ac:image>
<ri:attachment ri:filename="atlassian_logo.gif" />
</ac:image>
Supported image attributes (some of these attributes mirror the equivalent HTML 4 IMG element):
Name Description
---- -----------
ac:align image alignment
ac:border Set to "true" to set a border
ac:class css class attribute.
ac:title image tool tip.
ac:style css style
ac:thumbnail Set to "true" to designate this image as a thumbnail.
ac:alt alt text
ac:height image height
ac:width image width
"""
prefix = []
atts = {}
for (name, value) in attributes.items():
atts[name.lower()] = value
attlist = atts.items()
attlist = sorted(attlist)
parts = []
src_part = '<ri:attachment ri:filename="%s" />' % filename
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
value = u' '.join(map(unicode, value))
else:
# First assume Python 2
try:
value = unicode(value)
# Otherwise, do it the Python 3 way
except NameError:
value = str(value)
parts.append('ac:%s="%s"' % (name.lower(), self.attval(value)))
infix = '</ac:image>'
return ''.join(prefix) + '<ac:image %s>%s%s' % (' '.join(parts), src_part, infix) + suffix
def visit_image(self, node):
atts = {}
uri = node['uri']
filename = os.path.basename(uri)
atts['alt'] = node.get('alt', uri)
atts['thumbnail'] = 'true'
if 'width' in node:
atts['width'] = node['width']
if 'name' in node:
atts['title'] = node['name']
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
self.context.append('')
self.body.append(self.imgtag(filename, suffix, **atts))
def visit_title(self, node):
if isinstance(node.parent, nodes.section) and not TitlesCache.has_title(self.document):
h_level = self.section_level + self.initial_header_level - 1
if h_level == 1:
# Confluence take first title for page title from rst
# It use for making internal links
TitlesCache.set_title(self.document, node.children[0])
# ignore first header; document must have title header
raise nodes.SkipNode
HTMLTranslator.visit_title(self, node)
def visit_target(self, node):
"""
Anchor Macro
https://confluence.atlassian.com/display/DOC/Anchor+Macro
<ac:structured-macro ac:name="anchor">
<ac:parameter ac:name="">here</ac:parameter>
</ac:structured-macro>
"""
# Anchor confluence macros
anchor_macros = """
<ac:structured-macro ac:name="anchor">
<ac:parameter ac:name="">%s</ac:parameter>
</ac:structured-macro>
"""
if 'refid' in node or 'refname' in node:
if 'refuri' in node:
link = node['refuri']
elif 'refid' in node:
link = node['refid']
else:
link = node['refname']
self.body.append(anchor_macros % link)
def depart_target(self, node):
pass
def visit_literal_block(self, node):
"""
Code Block Macro
https://confluence.atlassian.com/display/DOC/Code+Block+Macro
<ac:structured-macro ac:name="code">
<ac:parameter ac:name="title">This is my title</ac:parameter>
<ac:parameter ac:name="theme">FadeToGrey</ac:parameter>
<ac:parameter ac:name="linenumbers">true</ac:parameter>
<ac:parameter ac:name="language">xml</ac:parameter>
<ac:parameter ac:name="firstline">0001</ac:parameter>
<ac:parameter ac:name="collapse">true</ac:parameter>
<ac:plain-text-body><![CDATA[<b>This is my code</b>]]></ac:plain-text-body>
</ac:structured-macro>
"""
parts = ['<ac:structured-macro ac:name="code">']
if 'language' in node:
# Collapsible argument
if node['language'] == 'collapse':
parts.append('<ac:parameter ac:name="collapse">true</ac:parameter>')
valid = ['actionscript3', 'bash', 'csharp', 'coldfusion', 'cpp', 'css', 'delphi', 'diff', 'erlang',
'groovy', 'html/xml', 'java', 'javafx', 'javascript', 'none', 'perl', 'php', 'powershell',
'python', 'ruby', 'scala', 'sql', 'vb']
if node['language'] not in valid:
node['language'] = 'none'
parts.append('<ac:parameter ac:name="language">%s</ac:parameter>' % node['language'])
if 'linenos' in node and node['linenos']:
parts.append('<ac:parameter ac:name="linenumbers">true</ac:parameter>')
if 'caption' in node and node['caption']:
parts.append('<ac:parameter ac:name="title">%s</ac:parameter>' % node['caption'])
parts.append('<ac:plain-text-body><![CDATA[%s]]></ac:plain-text-body>' % node.rawsource)
parts.append('</ac:structured-macro>')
self.body.append(''.join(parts))
raise nodes.SkipNode
def visit_download_reference(self, node):
"""
Link to an attachment
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format#ConfluenceStorageFormat-Links
<ac:link>
<ri:attachment ri:filename="atlassian_logo.gif" />
<ac:plain-text-link-body><![CDATA[Link to a Confluence Attachment]]></ac:plain-text-link-body>
</ac:link>
"""
if 'filename' not in node:
self.context.append('')
return
text = None
if len(node.children) > 0 and len(node.children[0].children) > 0:
text = node.children[0].children[0]
parts = [
'<ac:link>',
'<ri:attachment ri:filename="%s" />' % node['filename'],
'<ac:plain-text-link-body>',
'<![CDATA[%s]]>' % text if text else '',
'</ac:plain-text-link-body>',
'</ac:link>',
]
self.body.append(''.join(parts))
raise nodes.SkipNode
def visit_section(self, node):
# removed section open tag
self.section_level += 1
def depart_section(self, node):
# removed section close tag
self.section_level -= 1
def visit_reference(self, node):
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = ''
# Confluence makes internal links with prefix from page title
if node.get('internal') and TitlesCache.has_title(self.document):
atts['href'] += '#%s-' % TitlesCache.get_title(self.document).replace(' ', '')
atts['href'] += node['refuri']
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = 1
else:
assert 'refid' in node, 'References must have "refuri" or "refid" attribute.'
atts['href'] = ''
# Confluence makes internal links with prefix from page title
if node.get('internal') and TitlesCache.has_title(self.document):
atts['href'] += '#%s-' % TitlesCache.get_title(self.document).replace(' ', '')
atts['href'] += node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) % '.'.join(map(str, node['secnumber'])))
def visit_desc(self, node):
""" Replace <dl> """
self.body.append(self.starttag(node, 'div', style="margin-top: 10px"))
def depart_desc(self, node):
self.body.append('</div>\n\n')
def visit_desc_signature(self, node):
""" Replace <dt> """
# the id is set automatically
self.body.append(self.starttag(
node, 'div', style='margin-left: 20px; font-weight: bold;'))
# anchor for per-desc interactive data
if node.parent['objtype'] != 'describe' and node['ids'] and node['first']:
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
""" Copy-paste from original method """
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</div>')
def visit_desc_content(self, node):
""" Replace <dd> """
self.body.append(self.starttag(
node, 'div', '', style='margin-left: 40px;'))
def depart_desc_content(self, node):
self.body.append('</div>')
def visit_table(self, node):
""" Fix ugly table border
"""
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="0"))
def write_colspecs(self):
""" Fix ugly column width
"""
pass
class ImageConf(images.Image):
"""
Image confluence directive
"""
def run(self):
# remove 'align' processing
# remove 'target' processing
self.options.pop('align', None)
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
return [image_node]
class TocTree(Directive):
"""
Replace sphinx "toctree" directive to confluence macro
Table of Contents Macro
https://confluence.atlassian.com/display/DOC/Table+of+Contents+Macro
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="style">square</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
</ac:structured-macro>
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'maxdepth': int,
'name': directives.unchanged,
'caption': directives.unchanged_required,
'glob': directives.flag,
'hidden': directives.flag,
'includehidden': directives.flag,
'titlesonly': directives.flag,
}
def run(self):
macro = """
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="style">square</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
</ac:structured-macro>\n
"""
attributes = {'format': 'html'}
raw_node = nodes.raw('', macro, **attributes)
return [raw_node]
class JiraIssuesDirective(Directive):
"""
JIRA Issues Macro
https://confluence.atlassian.com/doc/jira-issues-macro-139380.html
<ac:structured-macro ac:name="jira" ac:schema-version="1" ac:macro-id="da6b6413-0b93-4052-af90-dbb252175860">
<ac:parameter ac:name="server">Atlassian JIRA (JAC)</ac:parameter>
<ac:parameter ac:name="columns">key,summary,created</ac:parameter>
<ac:parameter ac:name="maximumIssues">20</ac:parameter>
<ac:parameter ac:name="jqlQuery">project = CONF AND FixVersion=5.8 </ac:parameter>
<ac:parameter ac:name="serverId">146780e9-1234-312f-1243-ed0555666fa</ac:parameter>
</ac:structured-macro>
"""
required_arguments = 1
has_content = False
final_argument_whitespace = True
option_spec = {
"anonymous": true_false,
"server_id": directives.unchanged,
"baseurl": directives.unchanged,
"columns": directives.unchanged,
"count": true_false,
"height": directives.positive_int,
"title": directives.unchanged,
"render_mode": static_dynamic,
"url": directives.unchanged,
"width": directives.unchanged,
"maximum_issues": directives.positive_int
}
def run(self):
result = ['<ac:structured-macro ac:name="jira" ac:schema-version="1">']
param_macro = '<ac:parameter ac:name="{name}">{value}</ac:parameter>'
for name, value in self.options.items():
result.append(param_macro.format(name=underscore_to_camelcase(name), value=value))
jql_query = self.arguments[0]
result.append(param_macro.format(name='jqlQuery', value=jql_query))
result.append('</ac:structured-macro>')
attributes = {'format': 'html'}
raw_node = nodes.raw('', '\n'.join(result), **attributes)
return [raw_node]
class JiraIssueRole(roles.GenericRole):
def __call__(self, role, rawtext, text, *args, **kwargs):
macro = """\
<ac:structured-macro ac:name="jira" ac:schema-version="1">
<ac:parameter ac:name="key">{key}</ac:parameter>
<ac:parameter ac:name="showSummary">false</ac:parameter>
</ac:structured-macro>
"""
attributes = {'format': 'html'}
return [nodes.raw('', macro.format(key=text), **attributes)], []
class JiraUserRole(roles.GenericRole):
def __call__(self, role, rawtext, text, *args, **kwargs):
macro = """\
<ac:link>
<ri:user ri:username="{username}"/>
</ac:link>
"""
attributes = {'format': 'html'}
return [nodes.raw('', macro.format(username=text), **attributes)], []
class CaptionedCodeBlock(CodeBlock):
def run(self):
ret = super(CaptionedCodeBlock, self).run()
caption = self.options.get('caption')
if caption and isinstance(ret[0], nodes.container):
container_node = ret[0]
if isinstance(container_node[0], nodes.caption):
container_node[1]['caption'] = caption
return [container_node[1]]
return ret
def underscore_to_camelcase(text):
return ''.join(word.title() if i else word for i, word in enumerate(text.split('_')))
def get_path():
from os import path
package_dir = path.abspath(path.dirname(__file__))
template_path = path.join(package_dir, 'themes')
return template_path
def setup(app):
"""
:type app: sphinx.application.Sphinx
"""
app.config.html_theme_path = [get_path()]
app.config.html_theme = 'confluence'
app.config.html_scaled_image_link = False
if LooseVersion(sphinx.__version__) >= LooseVersion("1.4"):
app.set_translator("html", HTMLConfluenceTranslator)
app.set_translator("json", HTMLConfluenceTranslator)
else:
app.config.html_translator_class = 'sphinx_confluence.HTMLConfluenceTranslator'
app.config.html_add_permalinks = ''
jira_issue = JiraIssueRole('jira_issue', nodes.Inline)
app.add_role(jira_issue.name, jira_issue)
jira_user = JiraUserRole('jira_user', nodes.Inline)
app.add_role(jira_user.name, jira_user)
app.add_directive('image', ImageConf)
app.add_directive('toctree', TocTree)
app.add_directive('jira_issues', JiraIssuesDirective)
app.add_directive('code-block', CaptionedCodeBlock)
app.add_builder(JSONConfluenceBuilder)
| mit | 2,380,515,453,393,359,000 | 33.179966 | 113 | 0.582853 | false |
gsantovena/marathon | tests/system/test_marathon_root.py | 1 | 28234 | """ Test using root marathon.
This test suite imports all common tests found in marathon_common.py which are
to be tested on root marathon and MoM.
In addition it contains tests which are specific to root marathon, specifically
tests round dcos services registration and control and security.
"""
import apps
import common
import json
import os
import pytest
import requests
import retrying
import uuid
from datetime import timedelta
import dcos_service_marathon_tests
import marathon_auth_common_tests
import marathon_common_tests
import marathon_pods_tests
from shakedown.clients import marathon
from shakedown.dcos import marathon_leader_ip
from shakedown.dcos.agent import get_private_agents, get_public_agents, public_agents, required_public_agents # NOQA F401
from shakedown.dcos.cluster import dcos_1_9, dcos_version_less_than, ee_version, is_strict # NOQA F401
from shakedown.dcos.command import run_command, run_command_on_agent, run_command_on_master
from shakedown.dcos.marathon import deployment_wait, marathon_version_less_than # NOQA F401
from shakedown.dcos.master import get_all_master_ips, masters, is_multi_master, required_masters # NOQA F401
from shakedown.dcos.service import wait_for_service_endpoint
from fixtures import sse_events, wait_for_marathon_and_cleanup, user_billy, docker_ipv6_network_fixture, archive_sandboxes, install_enterprise_cli # NOQA F401
# the following lines essentially do:
# from dcos_service_marathon_tests import test_*
for attribute in dir(dcos_service_marathon_tests):
if attribute.startswith('test_'):
exec("from dcos_service_marathon_tests import {}".format(attribute))
# the following lines essentially do:
# from marathon_auth_common_tests import test_*
for attribute in dir(marathon_auth_common_tests):
if attribute.startswith('test_'):
exec("from marathon_auth_common_tests import {}".format(attribute))
# the following lines essentially do:
# from marathon_common_tests import test_*
for attribute in dir(marathon_common_tests):
if attribute.startswith('test_'):
exec("from marathon_common_tests import {}".format(attribute))
# the following lines essentially do:
# from marathon_pods_tests import test_*
for attribute in dir(marathon_pods_tests):
if attribute.startswith('test_'):
exec("from marathon_pods_tests import {}".format(attribute))
pytestmark = [pytest.mark.usefixtures('wait_for_marathon_and_cleanup')]
@pytest.fixture(scope="function")
def marathon_service_name():
return "marathon"
def setup_module(module):
# When the cluster is starting, it might happen that there is some delay in:
# - marathon leader registration with mesos
# - admin router refreshing cache (every 30s)
# We should not start our tests before marathon is accessible through service endpoint.
wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds(), path="ping")
common.cluster_info()
common.clean_up_marathon()
def teardown_module(module):
common.clean_up_marathon()
#################################################
# Root Marathon specific tests
#################################################
@masters(3)
def test_marathon_delete_leader(marathon_service_name):
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
common.abdicate_marathon_leader()
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
common.assert_marathon_leadership_changed(original_leader)
@masters(3)
def test_marathon_delete_leader_and_check_apps(marathon_service_name):
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
app_def = apps.sleep_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, "The number of running tasks is {}, but 1 was expected".format(app["tasksRunning"])
# abdicate leader after app was started successfully
common.abdicate_marathon_leader()
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
# wait until leader changed
common.assert_marathon_leadership_changed(original_leader)
original_leader = marathon_leader_ip()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_app_existence(expected_instances):
app = client.get_app(app_id)
assert app['tasksRunning'] == expected_instances
assert app['tasksRunning'] == expected_instances, \
"The number of running tasks is {}, but {} was expected".format(app["tasksRunning"], expected_instances)
# check if app definition is still there and one instance is still running after new leader was elected
check_app_existence(1)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def remove_app(app_id):
client.remove_app(app_id)
remove_app(app_id)
deployment_wait(service_id=app_id)
try:
client.get_app(app_id)
except Exception:
pass
else:
assert False, "The application resurrected"
# abdicate leader after app was started successfully
common.abdicate_marathon_leader()
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
# wait until leader changed
common.assert_marathon_leadership_changed(original_leader)
# check if app definition is still not there
try:
client.get_app(app_id)
except Exception:
pass
else:
assert False, "The application resurrected"
@masters(3)
def test_marathon_zk_partition_leader_change(marathon_service_name):
original_leader = common.get_marathon_leader_not_on_master_leader_node()
common.block_iptable_rules_for_seconds(original_leader, 2181, sleep_seconds=30)
common.assert_marathon_leadership_changed(original_leader)
@masters(3)
def test_marathon_master_partition_leader_change(marathon_service_name):
original_leader = common.get_marathon_leader_not_on_master_leader_node()
# blocking outbound connection to mesos master
# Marathon has a Mesos heartbeat interval of 15 seconds. If 5 are missed it
# disconnects. Thus we should wait more than 75 seconds.
common.block_iptable_rules_for_seconds(original_leader, 5050, sleep_seconds=100,
block_input=False, block_output=True)
common.assert_marathon_leadership_changed(original_leader)
@public_agents(1)
def test_launch_app_on_public_agent():
""" Test the successful launch of a mesos container on public agent.
MoMs by default do not have slave_public access.
"""
client = marathon.create_client()
app_def = common.add_role_constraint_to_app_def(apps.mesos_app(), ['slave_public'])
app_id = app_def["id"]
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
task_ip = tasks[0]['host']
assert task_ip in get_public_agents(), "The application task got started on a private agent"
@pytest.mark.skipif("is_strict()") # NOQA F811
@pytest.mark.skipif('marathon_version_less_than("1.3.9")')
@pytest.mark.usefixtures("wait_for_marathon_and_cleanup")
@pytest.mark.asyncio
async def test_event_channel(sse_events):
""" Tests the event channel. The way events are verified is by converting
the parsed events to an iterator and asserting the right oder of certain
events. Unknown events are skipped.
"""
await common.assert_event('event_stream_attached', sse_events)
app_def = apps.mesos_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
await common.assert_event('deployment_info', sse_events)
await common.assert_event('deployment_step_success', sse_events)
client.remove_app(app_id, True)
deployment_wait(service_id=app_id)
await common.assert_event('app_terminated_event', sse_events)
@dcos_1_9
@pytest.mark.skipif("is_strict()")
def test_external_volume():
volume_name = "marathon-si-test-vol-{}".format(uuid.uuid4().hex)
app_def = apps.external_volume_mesos_app()
app_def["container"]["volumes"][0]["external"]["name"] = volume_name
app_id = app_def['id']
# Tested with root marathon since MoM doesn't have
# --enable_features external_volumes option activated.
# First deployment should create the volume since it has a unique name
try:
print('INFO: Deploying {} with external volume {}'.format(app_id, volume_name))
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
# Create the app: the volume should be successfully created
common.assert_app_tasks_running(client, app_def)
common.assert_app_tasks_healthy(client, app_def)
# Scale down to 0
print('INFO: Scaling {} to 0 instances'.format(app_id))
client.stop_app(app_id)
deployment_wait(service_id=app_id)
# Scale up again: the volume should be successfully reused
print('INFO: Scaling {} back to 1 instance'.format(app_id))
client.scale_app(app_id, 1)
deployment_wait(service_id=app_id)
common.assert_app_tasks_running(client, app_def)
common.assert_app_tasks_healthy(client, app_def)
# Remove the app to be able to remove the volume
print('INFO: Finally removing {}'.format(app_id))
client.remove_app(app_id)
deployment_wait(service_id=app_id)
except Exception as e:
print('Fail to test external volumes: {}'.format(e))
raise e
finally:
# Clean up after the test: external volumes are not destroyed by marathon or dcos
# and have to be cleaned manually.
cmd = 'sudo /opt/mesosphere/bin/dvdcli remove --volumedriver=rexray --volumename={}'.format(volume_name)
removed = False
for agent in get_private_agents():
status, output = run_command_on_agent(agent, cmd) # NOQA
print('DEBUG: Failed to remove external volume with name={} on agent={}: {}'.format(
volume_name, agent, output))
if status:
removed = True
# Note: Removing the volume might fail sometimes because EC2 takes some time (~10min) to recognize that
# the volume is not in use anymore hence preventing it's removal. This is a known pitfall: we log the error
# and the volume should be cleaned up manually later.
if not removed:
print('WARNING: Failed to remove external volume with name={}'.format(volume_name))
else:
print('DEBUG: External volume with name={} successfully removed'.format(volume_name))
@pytest.mark.skipif('is_multi_master() or marathon_version_less_than("1.5")')
def test_marathon_backup_and_restore_leader(marathon_service_name):
"""Backup and restore meeting is done with only one master since new master has to be able
to read the backup file that was created by the previous master and the easiest way to
test it is when there is 1 master
"""
backup_file = 'backup.tar'
backup_dir = '/tmp'
backup_url = 'file://{}/{}'.format(backup_dir, backup_file)
# Deploy a simple test app. It is expected to be there after leader reelection
app_def = apps.sleep_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, "The number of running tasks is {}, but 1 was expected".format(app["tasksRunning"])
task_id = app['tasks'][0]['id']
# Abdicate the leader with backup and restore
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
params = '?backup={}&restore={}'.format(backup_url, backup_url)
print('DELETE /v2/leader{}'.format(params))
common.abdicate_marathon_leader(params)
# Wait for new leader (but same master server) to be up and ready
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, "The number of running tasks is {}, but 1 was expected".format(app["tasksRunning"])
assert task_id == app['tasks'][0]['id'], "Task has a different ID after restore"
# Check if the backup file exits and is valid
cmd = 'tar -tf {}/{} | wc -l'.format(backup_dir, backup_file)
status, data = run_command_on_master(cmd)
assert status, 'Failed to validate backup file {}'.format(backup_url)
assert int(data.rstrip()) > 0, "Backup file is empty"
# Regression for MARATHON-7525, introduced in MARATHON-7538
@masters(3)
@pytest.mark.skipif('marathon_version_less_than("1.5")')
def test_marathon_backup_and_check_apps(marathon_service_name):
backup_file1 = 'backup1.tar'
backup_file2 = 'backup2.tar'
backup_dir = '/tmp'
for master_ip in get_all_master_ips():
run_command(master_ip, "rm {}/{}".format(backup_dir, backup_file1))
run_command(master_ip, "rm {}/{}".format(backup_dir, backup_file2))
backup_url1 = 'file://{}/{}'.format(backup_dir, backup_file1)
backup_url2 = 'file://{}/{}'.format(backup_dir, backup_file2)
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
app_def = apps.sleep_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, "The number of running tasks is {}, but 1 was expected".format(app["tasksRunning"])
# Abdicate the leader with backup
original_leader = marathon_leader_ip()
params = '?backup={}'.format(backup_url1)
common.abdicate_marathon_leader(params)
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
# wait until leader changed
common.assert_marathon_leadership_changed(original_leader)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_app_existence(expected_instances):
try:
app = client.get_app(app_id)
except Exception as e:
if expected_instances != 0:
raise e
else:
if expected_instances == 0:
assert False, "The application resurrected"
else:
app['tasksRunning'] == expected_instances, \
"The number of running tasks is {}, but {} was expected".format(
app["tasksRunning"], expected_instances)
# check if app definition is still there and one instance is still running after new leader was elected
check_app_existence(1)
# then remove
client.remove_app(app_id)
deployment_wait(service_id=app_id)
check_app_existence(0)
# Do a second backup. Before MARATHON-7525 we had the problem, that doing a backup after an app was deleted
# leads to the state that marathon was not able to re-start, because the second backup failed constantly.
# Abdicate the leader with backup
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
params = '?backup={}'.format(backup_url2)
print('DELETE /v2/leader{}'.format(params))
common.abdicate_marathon_leader(params)
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
# wait until leader changed
# if leader changed, this means that marathon was able to start again, which is great :-).
common.assert_marathon_leadership_changed(original_leader)
# check if app definition is still not there and no instance is running after new leader was elected
check_app_existence(0)
@common.marathon_1_5
@pytest.mark.skipif("ee_version() is None")
@pytest.mark.skipif("common.docker_env_not_set()")
def test_private_repository_mesos_app():
"""Deploys an app with a private Docker image, using Mesos containerizer.
It relies on the global `install_enterprise_cli` fixture to install the
enterprise-cli-package.
"""
username = os.environ['DOCKER_HUB_USERNAME']
password = os.environ['DOCKER_HUB_PASSWORD']
secret_name = "pullconfig"
secret_value_json = common.create_docker_pull_config_json(username, password)
secret_value = json.dumps(secret_value_json)
app_def = apps.private_ucr_docker_app()
app_id = app_def["id"]
# In strict mode all tasks are started as user `nobody` by default and `nobody`
# doesn't have permissions to write to /var/log within the container.
if is_strict():
app_def['user'] = 'root'
common.add_dcos_marathon_user_acls()
common.create_secret(secret_name, secret_value)
client = marathon.create_client()
try:
client.add_app(app_def)
deployment_wait(service_id=app_id)
common.assert_app_tasks_running(client, app_def)
finally:
common.delete_secret(secret_name)
@pytest.mark.skipif('marathon_version_less_than("1.5")')
@pytest.mark.skipif("ee_version() is None")
def test_app_file_based_secret(secret_fixture):
secret_name, secret_value = secret_fixture
secret_container_path = 'mysecretpath'
app_id = '/app-fbs-{}'.format(uuid.uuid4().hex)
# In case you're wondering about the `cmd`: secrets are mounted via tmpfs inside
# the container and are not visible outside, hence the intermediate file
app_def = {
"id": app_id,
"instances": 1,
"cpus": 0.5,
"mem": 64,
"cmd": "cat {} >> {}_file && /opt/mesosphere/bin/python -m http.server $PORT_API".format(
secret_container_path, secret_container_path),
"container": {
"type": "MESOS",
"volumes": [{
"containerPath": secret_container_path,
"secret": "secret1"
}]
},
"portDefinitions": [{
"port": 0,
"protocol": "tcp",
"name": "api",
"labels": {}
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, 'Failed to start the file based secret app'
port = tasks[0]['ports'][0]
host = tasks[0]['host']
# The secret by default is saved in $MESOS_SANDBOX/.secrets/path/to/secret
cmd = "curl {}:{}/{}_file".format(host, port, secret_container_path)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def value_check():
status, data = run_command_on_master(cmd)
assert status, "{} did not succeed. status = {}, data = {}".format(cmd, status, data)
assert data.rstrip() == secret_value, "Got an unexpected secret data"
value_check()
@dcos_1_9
@pytest.mark.skipif("ee_version() is None")
def test_app_secret_env_var(secret_fixture):
secret_name, secret_value = secret_fixture
app_id = '/app-secret-env-var-{}'.format(uuid.uuid4().hex)
app_def = {
"id": app_id,
"instances": 1,
"cpus": 0.5,
"mem": 64,
"cmd": "echo $SECRET_ENV >> $MESOS_SANDBOX/secret-env && /opt/mesosphere/bin/python -m http.server $PORT_API",
"env": {
"SECRET_ENV": {
"secret": "secret1"
}
},
"portDefinitions": [{
"port": 0,
"protocol": "tcp",
"name": "api",
"labels": {}
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, 'Failed to start the secret environment variable app'
port = tasks[0]['ports'][0]
host = tasks[0]['host']
cmd = "curl {}:{}/secret-env".format(host, port)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def value_check():
status, data = run_command_on_master(cmd)
assert status, "{} did not succeed".format(cmd)
assert data.rstrip() == secret_value
value_check()
@dcos_1_9
@pytest.mark.skipif("ee_version() is None")
def test_app_inaccessible_secret_env_var():
secret_name = '/some/secret' # Secret in an inaccessible namespace
app_id = '/app-inaccessible-secret-env-var-{}'.format(uuid.uuid4().hex)
app_def = {
"id": app_id,
"instances": 1,
"cpus": 0.1,
"mem": 64,
"cmd": "echo \"shouldn't be called anyway\"",
"env": {
"SECRET_ENV": {
"secret": "secret1"
}
},
"portDefinitions": [{
"port": 0,
"protocol": "tcp",
"name": "api",
"labels": {}
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
with pytest.raises(requests.HTTPError) as excinfo:
client.add_app(app_def)
print('An app with an inaccessible secret could not be deployed because: {}'.format(excinfo.value))
assert excinfo.value.response.status_code == 422
assert 'Secret {} is not accessible'.format(secret_name) in excinfo.value.response.text
@dcos_1_9
@pytest.mark.skipif("ee_version() is None")
def test_pod_inaccessible_secret_env_var():
secret_name = '/some/secret' # Secret in an inaccessible namespace
pod_id = '/pod-inaccessible-secret-env-var-{}'.format(uuid.uuid4().hex)
pod_def = {
"id": pod_id,
"containers": [{
"name": "container-1",
"resources": {
"cpus": 0.1,
"mem": 64
},
"exec": {
"command": {
"shell": "echo \"shouldn't be called anyway\""
}
}
}],
"environment": {
"SECRET_ENV": {
"secret": "secret1"
}
},
"networks": [{
"mode": "host"
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
with pytest.raises(requests.HTTPError) as excinfo:
client.add_pod(pod_def)
print('A pod with an inaccessible secret could not be deployed because: {}'.format(excinfo.value))
assert excinfo.value.response.status_code == 422
assert 'Secret {} is not accessible'.format(secret_name) in excinfo.value.response.text
@dcos_1_9
@pytest.mark.skipif("ee_version() is None")
def test_pod_secret_env_var(secret_fixture):
secret_name, secret_value = secret_fixture
pod_id = '/pod-secret-env-var-{}'.format(uuid.uuid4().hex)
pod_def = {
"id": pod_id,
"containers": [{
"name": "container-1",
"resources": {
"cpus": 0.5,
"mem": 64
},
"endpoints": [{
"name": "http",
"hostPort": 0,
"protocol": [
"tcp"
]}
],
"exec": {
"command": {
"shell": "echo $SECRET_ENV && "
"echo $SECRET_ENV >> $MESOS_SANDBOX/secret-env && "
"/opt/mesosphere/bin/python -m http.server $ENDPOINT_HTTP"
}
}
}],
"environment": {
"SECRET_ENV": {
"secret": "secret1"
}
},
"networks": [{
"mode": "host"
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_pod(pod_def)
deployment_wait(service_id=pod_id)
instances = client.show_pod(pod_id)['instances']
assert len(instances) == 1, 'Failed to start the secret environment variable pod'
port = instances[0]['containers'][0]['endpoints'][0]['allocatedHostPort']
host = instances[0]['networks'][0]['addresses'][0]
cmd = "curl {}:{}/secret-env".format(host, port)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def value_check():
status, data = run_command_on_master(cmd)
assert status, "{} did not succeed. status = {}, data = {}".format(cmd, status, data)
assert data.rstrip() == secret_value, "Got an unexpected secret data"
value_check()
@pytest.mark.skipif('marathon_version_less_than("1.5")')
@pytest.mark.skipif("ee_version() is None")
def test_pod_file_based_secret(secret_fixture):
secret_name, secret_value = secret_fixture
secret_normalized_name = secret_name.replace('/', '')
pod_id = '/pod-fbs-{}'.format(uuid.uuid4().hex)
pod_def = {
"id": pod_id,
"containers": [{
"name": "container-1",
"resources": {
"cpus": 0.5,
"mem": 64
},
"endpoints": [{
"name": "http",
"hostPort": 0,
"protocol": [
"tcp"
]}
],
"exec": {
"command": {
"shell": "cat {} >> {}_file && /opt/mesosphere/bin/python -m http.server $ENDPOINT_HTTP".format(
secret_normalized_name, secret_normalized_name),
}
},
"volumeMounts": [{
"name": "vol",
"mountPath": './{}'.format(secret_name)
}],
}],
"networks": [{
"mode": "host"
}],
"volumes": [{
"name": "vol",
"secret": "secret1"
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_pod(pod_def)
deployment_wait(service_id=pod_id)
instances = client.show_pod(pod_id)['instances']
assert len(instances) == 1, 'Failed to start the file based secret pod'
port = instances[0]['containers'][0]['endpoints'][0]['allocatedHostPort']
host = instances[0]['networks'][0]['addresses'][0]
cmd = "curl {}:{}/{}_file".format(host, port, secret_normalized_name)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def value_check():
status, data = run_command_on_master(cmd)
assert status, "{} did not succeed. status = {}, data = {}".format(cmd, status, data)
assert data.rstrip() == secret_value, "Got an unexpected secret data"
value_check()
# Uncomment to run a quick and sure-to-pass SI test on any cluster. Useful for running SI tests locally
# from fixtures import parent_group
# def test_foo(parent_group):
# client = marathon.create_client()
# app_def = apps.sleep_app(parent_group=parent_group)
# app_id = app_def['id']
# client.add_app(app_def)
# deployment_wait(service_id=app_id)
#
# tasks = client.get_tasks(app_id)
# assert len(tasks) == 1, 'Failed to start a simple sleep app'
@pytest.fixture(scope="function")
def secret_fixture():
secret_name = '/mysecret'
secret_value = 'super_secret_password'
common.create_secret(secret_name, secret_value)
yield secret_name, secret_value
common.delete_secret(secret_name)
| apache-2.0 | -217,581,934,169,658,050 | 34.073292 | 158 | 0.624495 | false |
kubevirt/client-python | kubevirt/models/k8s_io_apimachinery_pkg_apis_meta_v1_time.py | 1 | 2430 | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class K8sIoApimachineryPkgApisMetaV1Time(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
K8sIoApimachineryPkgApisMetaV1Time - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, K8sIoApimachineryPkgApisMetaV1Time):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | 7,947,710,263,294,770,000 | 23.545455 | 77 | 0.518519 | false |
Yusuke1987/openstack_template | openapp/openstack/common/report/generators/process.py | 1 | 1221 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides process-data generators
This modules defines a class for generating
process data by way of the psutil package.
"""
import os
import psutil
from openapp.openstack.common.report.models import process as pm
class ProcessReportGenerator(object):
"""A Process Data Generator
This generator returns a
:class:`openstack.common.report.models.process.ProcessModel`
based on the current process (which will also include
all subprocesses, recursively) using the :class:`psutil.Process` class`.
"""
def __call__(self):
return pm.ProcessModel(psutil.Process(os.getpid()))
| apache-2.0 | 4,941,720,029,709,224,000 | 31.131579 | 78 | 0.730549 | false |
chrisvans/roasttron | docs/conf.py | 1 | 7821 | # -*- coding: utf-8 -*-
#
# roasttron documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'roasttron'
copyright = u"2015, Chris VanSchyndel"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'roasttrondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'roasttron.tex',
u'roasttron Documentation',
u"Chris VanSchyndel", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'roasttron', u'roasttron Documentation',
[u"Chris VanSchyndel"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'roasttron', u'roasttron Documentation',
u"Chris VanSchyndel", 'roasttron',
'Roast Profiling Application', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| mit | 6,534,412,259,365,037,000 | 30.922449 | 80 | 0.697737 | false |
untitaker/python-webuntis | webuntis/objects.py | 1 | 29012 | """
This file is part of python-webuntis
:copyright: (c) 2013 by Markus Unterwaditzer.
:license: BSD, see LICENSE for more details.
"""
import datetime
from webuntis.utils import datetime_utils, lazyproperty, \
timetable_utils
class Result(object):
"""Base class used to represent most API objects.
:param data: Usually JSON data that should be represented.
In the case of :py:class:`ListResult`, however, it might also
be a list of JSON mixed with :py:class:`ListItem` objects.
:param parent: (optional) A result object this result should be the child
of. If given, the session will be inherited.
:param session: Mandatory if ``parent`` is not supplied. Overrides the
parent's inherited session.
"""
def __init__(self, data, parent=None, session=None):
if bool(parent is None) == bool(session is None):
raise TypeError('Either parent or session has to be provided.')
if parent is not None and not hasattr(parent, '_session'):
raise TypeError('Parent must have a _session attribute.')
self._session = session or parent._session
self._parent = parent
self._data = data
@lazyproperty
def id(self):
"""The ID of this element.
An ID is needed for the object to be hashable. Therefore a result
may bring its own implementation of this method even though the
original API response didn't contain any ID."""
return self._data[u'id'] if 'id' in self._data else None
def __int__(self):
"""This is useful if the users pass a ListItem when a numerical ID
is expected, so we just can put the thing through int(), regardless of
what type it is."""
assert self.id is not None
return self.id
def __hash__(self):
assert self.id is not None
return hash(self.__class__.__name__) * 101 + self.id
def __eq__(self, other):
return type(self) is type(other) and hash(self) == hash(other)
def __getstate__(self):
return self._data
def __setstate__(self, data):
self._data = data
def __str__(self):
"""a simple to string function: just the name or the full info -- debug only"""
try:
return self._data[u'name']
except KeyError:
try:
return self.name
except AttributeError:
return str(self._data)
except TypeError:
return str(self._data)
def __repr__(self):
try:
return self.__class__.__qualname__ + "(" + str(self._data) + ")"
except AttributeError:
return self.__class__.__name__ + "(" + str(self._data) + ")"
class ListItem(Result):
"""ListItems represent an item in a
:py:class:`Result`. They don\'t contain methods to
retrieve data."""
class ListResult(Result):
"""A list-like version of :py:class:`Result` that takes a list and returns
a list of objects, containing a list value each.
"""
# When the Result returns an array, this is very useful. Every item of that
# array will be fed to an instance of self._itemclass, with the session and
# the array item as initialization arguments.
#: the class which should be used to instantiate an array item.
_itemclass = ListItem
def filter(self, **criterions):
"""
Return a list of all objects, filtered by attributes::
foo = s.klassen().filter(id=1) # is kind-of the same as
foo = [kl for kl in s.klassen() if kl.id == 1]
# We can also use sets to match multiple values.
bar = s.klassen().filter(name={'1A', '2A', '3A', '4A'})
# is kind-of the same as
bar = [kl for kl in s.klassen()
if kl.id in {'1A', '2A', '3A', '4A'}]
# Or you can use a list: this keeps the order: the first element
# of the result corresponds to the first element in the filter
# Important after using combine()
bar = s.klassen().filter(name=['1A', '2A', '3A', '4A'])
# --> bar[0].name == '1A'
# Since ``filter`` returns a ListResult itself too, we can chain
# multiple calls together:
bar = s.klassen().filter(id=4, name='7A') # is the same as
bar = s.klassen().filter(id=4).filter(name='7A')
:py:meth:`filter` is also used when using the ``in`` operator on a
:py:class:`ListResult`::
we_have_it = {'name': '6A'} in s.klassen() # same as
we_have_it = bool(s.klassen().filter(name='6A'))
.. note::
This is only available because it looks nicer than list
comprehensions or generator expressions. Depending on your usecase
alternatives to this method may be faster.
"""
criterions = list(criterions.items())
def meets_criterions(item):
"""Returns true if the item meets the criterions"""
for key, value in criterions:
# if the attribute value isn't one we're looking for
attribute = getattr(item, key)
if attribute == value:
continue
elif isinstance(value, set) and attribute in value:
continue
else:
return False
return True
if isinstance(criterions[0][1], list):
return type(self)(
parent=self,
data=[self.filter(**{key: v})[0]
for key, values in criterions
for v in values
])
return type(self)(
parent=self,
data=[item for item in self if meets_criterions(item)]
)
def __contains__(self, criterion):
if isinstance(criterion, self._itemclass):
return any(item is criterion for item in self)
return bool(self.filter(**criterion))
def __getitem__(self, i):
"""Makes the object iterable and behave like a list"""
data = self._data[i] # fails if there is no such item
if type(data) is not self._itemclass:
data = self._data[i] = self._itemclass(
parent=self,
data=data
)
return data
def __len__(self):
"""Return the length of the items"""
return len(self._data)
def __hash__(self):
raise NotImplementedError()
def __eq__(self, other):
return type(other) is type(self) and other._data == self._data
def __str__(self):
"""a simple to string function: a list of results -- debug only"""
return "[" + ", ".join(str(d) for d in self._data) + "]"
def __repr__(self):
"""a simple to string function: a list of results -- debug only"""
try:
return self.__class__.__qualname__ + "[" + ", ".join(repr(d) for d in self._data) + "]"
except AttributeError:
return self.__class__.__name__ + "[" + ", ".join(repr(d) for d in self._data) + "]"
class DepartmentObject(ListItem):
"""Represents a department"""
@lazyproperty
def name(self):
"""short name such as *R1A*"""
return self._data[u'name']
@lazyproperty
def long_name(self):
"""Long name, such as *Raum Erste A*. Not predictable."""
return self._data[u'longName']
class DepartmentList(ListResult):
"""A list of departments, in form of :py:class:`DepartmentObject`
instances."""
_itemclass = DepartmentObject
class HolidayObject(ListItem):
"""Represents a single holiday."""
@lazyproperty
def start(self):
"""The start date of the holiday, as a datetime object."""
return datetime_utils.parse_date(self._data[u'startDate'])
@lazyproperty
def end(self):
"""The end of the holiday"""
return datetime_utils.parse_date(self._data[u'endDate'])
@lazyproperty
def name(self):
"""Name, such as *Nationalfeiertag*."""
return self._data[u'longName']
@lazyproperty
def short_name(self):
"""Abbreviated form of the name"""
return self._data[u'name']
class HolidayList(ListResult):
"""A list of holidays, in form of :py:class:`HolidayObject`
instances."""
_itemclass = HolidayObject
class ColorMixin:
"""Interface support fore/back color"""
@lazyproperty
def forecolor(self):
"""The foreground color used in the web interface and elsewhere"""
return self._data[self.name][u'foreColor']
@lazyproperty
def backcolor(self):
"""The background color used in the web interface and elsewhere"""
return self._data[self.name][u'backColor']
class KlassenObject(ListItem, ColorMixin):
"""Represents a school class."""
@lazyproperty
def name(self):
"""Name of class"""
return self._data[u'name']
@lazyproperty
def long_name(self):
"""Long name of class"""
return self._data[u'longName']
class KlassenList(ListResult):
"""A list of school classes, in form of :py:class:`KlassenObject`
instances."""
_itemclass = KlassenObject
class PeriodObject(ListItem):
"""Represents a time range, where lessons/subjects may be held."""
@lazyproperty
def start(self):
"""The start date/time of the period, as datetime object."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'startTime']
)
@lazyproperty
def end(self):
"""The end date/time of the period."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'endTime']
)
@lazyproperty
def klassen(self):
"""A :py:class:`KlassenList` containing the classes which are attending
this period."""
return self._session.klassen(from_cache=True).filter(
id=[kl[u'id'] for kl in self._data[u'kl']]
)
@lazyproperty
def teachers(self):
"""A list of :py:class:`TeacherObject` instances,
which are attending this period."""
return self._session.teachers(from_cache=True).filter(
id=[te[u'id'] for te in self._data[u'te']]
)
@lazyproperty
def subjects(self):
"""A :py:class:`SubjectList` containing the subjects which are topic of
this period. This is not used for things like multiple language lessons
(*e.g.* Latin, Spanish, French) -- each of those will get placed in
their own period."""
return self._session.subjects(from_cache=True).filter(
id=[su[u'id'] for su in self._data[u'su']]
)
@lazyproperty
def rooms(self):
"""The rooms (:py:class:`RoomList`) where this period is taking place
at. This also is not used for multiple lessons, but rather for a single
lesson that is actually occuring at multiple locations (?)."""
return self._session.rooms(from_cache=True).filter(
id=[ro[u'id'] for ro in self._data[u'ro']]
)
@lazyproperty
def code(self):
"""May be:
- ``None`` -- There's nothing special about this period.
- ``"cancelled"`` -- Cancelled
- ``"irregular"`` -- Substitution/"Supplierung"/Not planned event
"""
code = self._data.get(u'code', None)
if code in (None, u'cancelled', u'irregular'):
return code
return None
@lazyproperty
def original_teachers(self):
""" Support for original teachers """
try:
return self._session.teachers(from_cache=True).filter(id=[te[u'orgid'] for te in self._data[u'te']])
except KeyError:
pass
return []
@lazyproperty
def original_rooms(self):
""" Support for original rooms """
try:
return self._session.rooms(from_cache=True).filter(id=[ro[u'orgid'] for ro in self._data[u'ro']])
except KeyError:
pass
return []
@lazyproperty
def type(self):
"""May be:
- ``"ls"`` -- Normal lesson
- ``"oh"`` -- Office hour
- ``"sb"`` -- Standby
- ``"bs"`` -- Break Supervision
- ``"ex"`` -- Examination
"""
return self._data.get(u'lstype', u'ls')
class PeriodList(ListResult):
"""Aka timetable, a list of periods, in form of :py:class:`PeriodObject`
instances."""
_itemclass = PeriodObject
def to_table(self, dates=None, times=None):
"""
Creates a table-like structure out of the periods. Useful for rendering
timetables in HTML and other markup languages.
Check out the example from the repository for clarification.
:param dates: An iterable of :py:class:`datetime.date` objects that
definetly should be included in the table. If this parameter is
``None``, the timetable is just as wide as it has to be, leaving
out days without periods.
:param times: An iterable of :py:class:`datetime.time` objects that
definetly should be included in the table. If this parameter is
``None``, the timetable is just as tall as it has to be, leaving
out hours without periods.
:returns: A list containing "rows", which in turn contain "hours",
which contain :py:class:`webuntis.objects.PeriodObject` instances
which are happening at the same time.
"""
return timetable_utils.table(self, dates=dates, times=times)
def combine(self, combine_breaks=True):
"""
Combine consecutive entries
:param combine_breaks: combine of breaks
:return:
"""
return timetable_utils.combine(self, {'date', 'activityType', 'su', 'kl'}, combine_breaks)
class RoomObject(ListItem, ColorMixin):
"""Represents a physical room. Such as a classroom, but also the physics
lab or whatever.
"""
@lazyproperty
def name(self):
"""The short name of the room. Such as PHY."""
return self._data[u'name']
@lazyproperty
def long_name(self):
"""The long name of the room. Such as "Physics lab"."""
return self._data[u'longName']
class RoomList(ListResult):
"""A list of rooms, in form of :py:class:`RoomObject` instances."""
_itemclass = RoomObject
class SchoolyearObject(ListItem):
"""Represents a schoolyear."""
@lazyproperty
def name(self):
""""2010/2011\""""
return self._data[u'name']
@lazyproperty
def start(self):
"""The start date of the schoolyear, as datetime object"""
return datetime_utils.parse_date(self._data[u'startDate'])
@lazyproperty
def end(self):
"""The end date"""
return datetime_utils.parse_date(self._data[u'endDate'])
@lazyproperty
def is_current(self):
"""
Boolean, check if this is the current schoolyear::
>>> import webuntis
>>> s = webuntis.Session(...).login()
>>> y = s.schoolyears()
>>> y.current.id
7
>>> y.current.is_current
True
>>> y.filter(id=y.current.id).is_current
True
"""
return self == self._parent.current
class SchoolyearList(ListResult):
"""A list of schoolyears, in form of :py:class:`SchoolyearObject`
instances."""
_itemclass = SchoolyearObject
@lazyproperty
def current(self):
"""Returns the current schoolyear in form of a
:py:class:`SchoolyearObject`"""
current_data = self._session._request(u'getCurrentSchoolyear')
current = self.filter(id=current_data[u'id'])[0]
return current
class SubjectObject(ListItem, ColorMixin):
"""Represents a subject."""
@lazyproperty
def name(self):
"""Short name of subject, such as *PHY*"""
return self._data[u'name']
@lazyproperty
def long_name(self):
"""Long name of subject, such as *Physics*"""
return self._data[u'longName']
class SubjectList(ListResult):
"""A list of subjects, in form of :py:class:`SubjectObject` instances."""
_itemclass = SubjectObject
class PersonObject(ListItem):
"""Represents a person (teacher or student)."""
@lazyproperty
def fore_name(self):
"""fore name of the person"""
return self._data[u'foreName']
@lazyproperty
def long_name(self):
"""surname of person"""
return self._data[u'longName']
surname = long_name
@lazyproperty
def name(self):
"""full name of the person"""
return self._data[u'name']
class TeacherObject(PersonObject):
"""Represents a teacher."""
@lazyproperty
def title(self):
"""title of the teacher"""
return self._data[u'title']
@lazyproperty
def full_name(self):
"""full name of teacher (title, forname, longname"""
return " ".join((self.title, self.fore_name, self.long_name)).strip()
class TeacherList(ListResult):
"""A list of teachers, in form of :py:class:`TeacherObject` instances."""
_itemclass = TeacherObject
class ColorInfo(Result, ColorMixin):
"""
An object containing information about a lesson type or a period code::
>>> import webuntis
>>> s = webuntis.Session(...).login()
>>> lstype = s.statusdata().lesson_types[0]
>>> lstype.name
'ls'
>>> lstype.forecolor
'000000'
>>> lstype.backcolor
'ee7f00'
::
>>> pcode = s.statusdata().period_codes[0]
>>> pcode.name
'cancelled'
>>> pcode.forecolor
'FFFFFF'
>>> pcode.backcolor
'FF0000'
"""
@lazyproperty
def id(self):
return hash(self.__class__.__name__ + self.name)
@lazyproperty
def name(self):
"""The name of the LessonType or PeriodCode"""
return list(self._data.items())[0][0]
class StatusData(Result):
"""Information about lesson types and period codes and their colors."""
@lazyproperty
def lesson_types(self):
"""A list of :py:class:`ColorInfo` objects, containing
information about all lesson types defined
:rtype: `list` [ColorInfo]
"""
return [
ColorInfo(parent=self, data=data)
for data in self._data[u'lstypes']
]
@lazyproperty
def period_codes(self):
"""A list of :py:class:`ColorInfo` objects, containing
information about all period codes defined
:rtype: `list` [ColorInfo]
"""
return [
ColorInfo(parent=self, data=data)
for data in self._data[u'codes']
]
class TimeStampObject(Result):
"""Information about last change of data -- timestamp (given in milliseconds)"""
@lazyproperty
def date(self):
"""
get timestamp as python datetime object
:return: datetime.datetime
"""
return datetime.datetime.fromtimestamp(self._data / 1000)
class SubstitutionObject(PeriodObject):
"""Information about substitution."""
@lazyproperty
def type(self):
"""type of substitution
cancel cancellation
subst teacher substitution
add additional period
shift shifted period
rmchg room change
:rtype: str
"""
return self._data[u'type']
@lazyproperty
def reschedule_start(self):
"""The start of the rescheduled substitution (or None)
:return: datetime.datetime
"""
try:
return datetime_utils.parse_datetime(self._data[u'reschedule'][u'date'],
self._data[u'reschedule'][u'startTime'])
except KeyError:
return None
@lazyproperty
def reschedule_end(self):
"""The end of the rescheduled substitution (or None)
:return: datetime.datetime
"""
try:
return datetime_utils.parse_datetime(self._data[u'reschedule'][u'date'],
self._data[u'reschedule'][u'endTime'])
except KeyError:
return None
class SubstitutionList(ListResult):
"""A list of substitutions in form of :py:class:`SubstitutionObject` instances."""
_itemclass = SubstitutionObject
def combine(self, combine_breaks=True):
"""
Combine consecutive entries
:param combine_breaks: combine of breaks
:return:
"""
return timetable_utils.combine(self,
{'date', 'type', 'kl', 'su'},
combine_breaks,
lambda p: (p['type'],
str(p[u'te'] and p[u'te'][0][u'name']),
p[u'date'],
p[u'startTime']))
class TimeUnitObject(Result):
"""Information about the time grid"""
@lazyproperty
def name(self):
"""Name of Timeunit"""
return self._data[u'name']
@lazyproperty
def start(self):
return datetime_utils.parse_time(
self._data[u'startTime']
).time()
@lazyproperty
def end(self):
return datetime_utils.parse_time(
self._data[u'endTime']
).time()
class TimegridDayObject(Result):
"""Information about one day in the time grid"""
@lazyproperty
def day(self):
return self._data[u'day']
@lazyproperty
def dayname(self):
names = {1: "sunday", 2: "monday", 3: "tuesday", 4: "wednesday", 5: "thursday", 6: "friday", 7: "saturday"}
return names[self._data[u'day']]
@lazyproperty
def time_units(self):
return [
TimeUnitObject(parent=self, data=data)
for data in self._data[u'timeUnits']
]
class TimegridObject(ListResult):
"""A list of TimegridDayObjects"""
_itemclass = TimegridDayObject
class StudentObject(PersonObject):
"""Represents a student."""
@lazyproperty
def full_name(self):
"""full name of student (forname, longname)"""
return " ".join((self.fore_name, self.long_name)).strip()
@lazyproperty
def gender(self):
return self._data[u'gender']
@lazyproperty
def key(self):
return self._data[u'key']
class StudentsList(ListResult):
"""A list of students"""
_itemclass = StudentObject
class ExamTypeObject(Result):
"""Represents an Exam Type."""
@lazyproperty
def long_name(self):
"""Long name"""
return self._data[u'longName']
@lazyproperty
def name(self):
"""name"""
return self._data[u'name']
@lazyproperty
def show_in_timetable(self):
"""show this exam type in the timetable"""
return self._data[u'showInTimetable']
class ExamTypeList(ListResult):
"""A list of exam types"""
_itemclass = ExamTypeObject
class ExamObject(Result):
"""Represents an Exam."""
# classes list of classes
# teachers list of teachers
# students list of students
# subject
@lazyproperty
def start(self):
"""The start date/time of the period, as datetime object."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'startTime']
)
@lazyproperty
def end(self):
"""The end date/time of the period."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'endTime']
)
@lazyproperty
def klassen(self):
"""A :py:class:`KlassenList` containing the classes which are attending
this period."""
return self._session.klassen(from_cache=True).filter(
id=set(self._data[u'classes'])
)
@lazyproperty
def teachers(self):
"""A list of :py:class:`TeacherObject` instances,
which are attending this period."""
return self._session.teachers(from_cache=True).filter(
id=set(self._data[u'teachers'])
)
@lazyproperty
def subject(self):
"""A :py:class:`SubjectObject` with the subject which are topic of
this period."""
return self._session.subjects(from_cache=True).filter(id=self._data[u'subject'])[0]
@lazyproperty
def students(self):
"""A list of :py:class:`StudentObject` instances,
which are attending this period."""
return self._session.students(from_cache=True).filter(
id=set(self._data[u'students'])
)
class ExamsList(ListResult):
"""A list of exams."""
_itemclass = ExamObject
class AbsenceObject(Result):
"""Represents an absence.
Attention: if there are multiple teachers/groups at the same time -> multiple entries for the
same student, but the absentTime is only set for one (the first?) entry.
"""
@lazyproperty
def student(self):
"""
doku says: student ID, but it is the students KEY
:return:
"""
return self._session.students(from_cache=True).filter(key=self._data[u'studentId'])[0]
@lazyproperty
def subject(self):
"""@TODO: untested - always empty"""
try:
sid = int(self._data[u'subjectId'])
except ValueError:
return ""
return self._session.subjects(from_cache=True).filter(id=sid)[0]
@lazyproperty
def teachers(self):
"""@TODO: untested - always empty"""
try:
tes = list(int(te) for te in self._data[u'teacherIds'] if te)
except ValueError:
return []
return self._session.teachers(from_cache=True).filter(id=tes)
@lazyproperty
def student_group(self):
try:
return self._data[u'studentGroup']
except KeyError:
return ''
@lazyproperty
def checked(self):
return self._data[u'checked']
@lazyproperty
def name(self):
"""Name of absent student"""
return self.student.full_name
@lazyproperty
def start(self):
"""The start date/time of the period, as datetime object."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'startTime']
)
@lazyproperty
def end(self):
"""The end date/time of the period."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'endTime']
)
@lazyproperty
def reason(self):
try:
return self._data[u'absenceReason']
except KeyError:
return ''
@lazyproperty
def time(self):
try:
return int(self._data[u'absentTime'])
except KeyError:
return 0
@lazyproperty
def status(self):
try:
return self._data[u'excuseStatus']
except KeyError:
return ''
class AbsencesList(ListResult):
"""A list of absences."""
_itemclass = AbsenceObject
def __init__(self, data, parent=None, session=None):
# the data is a dict() with just one key
data = data[u'periodsWithAbsences']
Result.__init__(self, data, parent, session)
class ClassRegEvent(Result):
"""Represents an ClassRegEvent."""
@lazyproperty
def student(self):
"""
doku says: student ID, but it is the students KEY
:return:
"""
return self._session.students(from_cache=True).filter(key=self._data[u'studentid'])[0]
@lazyproperty
def sur_name(self):
"""sur name of the person"""
return self._data[u'surname']
@lazyproperty
def fore_name(self):
"""fore name of the person"""
return self._data[u'forname']
@lazyproperty
def name(self):
"""fore name of the person"""
return " ".join((self.sur_name, self.fore_name))
@lazyproperty
def reason(self):
"""reason of the classregevent"""
return self._data[u'reason']
@lazyproperty
def text(self):
"""text of the classregevent"""
return self._data[u'text']
@lazyproperty
def date(self):
"""the date of the classregevent."""
return datetime_utils.parse_date(self._data[u'date'])
@lazyproperty
def subject(self):
"""the subject of the classregevent."""
return self._data[u'subject']
class ClassRegEventList(ListResult):
"""A list of ClassRegEvents."""
_itemclass = ClassRegEvent
| bsd-3-clause | 4,595,428,751,574,091,300 | 27.555118 | 115 | 0.576106 | false |
dims/cinder | cinder/volume/drivers/dell/dell_storagecenter_common.py | 1 | 55203 | # Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder import objects
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import volume_types
common_opts = [
cfg.IntOpt('dell_sc_ssn',
default=64702,
help='Storage Center System Serial Number'),
cfg.PortOpt('dell_sc_api_port',
default=3033,
help='Dell API port'),
cfg.StrOpt('dell_sc_server_folder',
default='openstack',
help='Name of the server folder to use on the Storage Center'),
cfg.StrOpt('dell_sc_volume_folder',
default='openstack',
help='Name of the volume folder to use on the Storage Center'),
cfg.BoolOpt('dell_sc_verify_cert',
default=False,
help='Enable HTTPS SC certificate verification.')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(common_opts)
class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
driver.ExtendVD, driver.ReplicaV2VD,
driver.SnapshotVD, driver.BaseVD):
def __init__(self, *args, **kwargs):
super(DellCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common_opts)
self.configuration.append_config_values(san_opts)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell'
self.backends = self.configuration.safe_get('replication_device')
self.replication_enabled = True if self.backends else False
self.is_direct_connect = False
def _bytes_to_gb(self, spacestring):
"""Space is returned in a string like ...
7.38197504E8 Bytes
Need to split that apart and convert to GB.
:returns: gbs in int form
"""
try:
n = spacestring.split(' ', 1)
fgbs = float(n[0]) / 1073741824.0
igbs = int(fgbs)
return igbs
except Exception:
# If any of that blew up it isn't in the format we
# thought so eat our error and return None
return None
def do_setup(self, context):
"""One time driver setup.
Called once by the manager after the driver is loaded.
Sets up clients, check licenses, sets up protocol
specific helpers.
"""
self._client = dell_storagecenter_api.StorageCenterApiHelper(
self.configuration)
def check_for_setup_error(self):
"""Validates the configuration information."""
with self._client.open_connection() as api:
api.find_sc()
self.is_direct_connect = api.is_direct_connect
if self.is_direct_connect and self.replication_enabled:
msg = _('Dell Cinder driver configuration error replication '
'not supported with direct connect.')
raise exception.InvalidHost(reason=msg)
if self.replication_enabled:
# Check that our replication destinations are available.
# TODO(tswanson): Check if we need a diskfolder. (Or not.)
# TODO(tswanson): Can we check that the backend specifies
# TODO(tswanson): the same ssn as target_device_id.
for backend in self.backends:
replssn = backend['target_device_id']
try:
# Just do a find_sc on it. If it raises we catch
# that and raise with a correct exception.
api.find_sc(int(replssn))
except exception.VolumeBackendAPIException:
msg = _('Dell Cinder driver configuration error '
'replication_device %s not found') % replssn
raise exception.InvalidHost(reason=msg)
def _get_volume_extra_specs(self, volume):
"""Gets extra specs for the given volume."""
type_id = volume.get('volume_type_id')
if type_id:
return volume_types.get_volume_type_extra_specs(type_id)
return {}
def _add_volume_to_consistency_group(self, api, scvolume, volume):
"""Just a helper to add a volume to a consistency group.
:param api: Dell SC API opbject.
:param scvolume: Dell SC Volume object.
:param volume: Cinder Volume object.
:returns: Nothing.
"""
if scvolume and volume.get('consistencygroup_id'):
profile = api.find_replay_profile(
volume.get('consistencygroup_id'))
if profile:
api.update_cg_volumes(profile, [volume])
def _do_repl(self, api, volume):
"""Checks if we can do replication.
Need the extra spec set and we have to be talking to EM.
:param api: Dell REST API object.
:param volume: Cinder Volume object.
:return: Boolean (True if replication enabled), Boolean (True if
replication type is sync.
"""
do_repl = False
sync = False
if not self.is_direct_connect:
specs = self._get_volume_extra_specs(volume)
do_repl = specs.get('replication_enabled') == '<is> True'
sync = specs.get('replication_type') == '<in> sync'
return do_repl, sync
def _create_replications(self, api, volume, scvolume):
"""Creates any appropriate replications for a given volume.
:param api: Dell REST API object.
:param volume: Cinder volume object.
:param scvolume: Dell Storage Center Volume object.
:return: model_update
"""
# Replication V2
# for now we assume we have an array named backends.
replication_driver_data = None
# Replicate if we are supposed to.
do_repl, sync = self._do_repl(api, volume)
if do_repl:
for backend in self.backends:
# Check if we are to replicate the active replay or not.
specs = self._get_volume_extra_specs(volume)
replact = specs.get('replication:activereplay') == '<is> True'
if not api.create_replication(scvolume,
backend['target_device_id'],
backend.get('qosnode',
'cinderqos'),
sync,
backend.get('diskfolder', None),
replact):
# Create replication will have printed a better error.
msg = _('Replication %(name)s to %(ssn)s failed.') % {
'name': volume['id'],
'ssn': backend['target_device_id']}
raise exception.VolumeBackendAPIException(data=msg)
if not replication_driver_data:
replication_driver_data = backend['target_device_id']
else:
replication_driver_data += ','
replication_driver_data += backend['target_device_id']
# If we did something return model update.
model_update = {}
if replication_driver_data:
model_update = {'replication_status': 'enabled',
'replication_driver_data': replication_driver_data}
return model_update
@staticmethod
def _cleanup_failed_create_volume(api, volumename):
try:
api.delete_volume(volumename)
except exception.VolumeBackendAPIException as ex:
LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg)
def create_volume(self, volume):
"""Create a volume."""
model_update = {}
# We use id as our name as it is unique.
volume_name = volume.get('id')
# Look for our volume
volume_size = volume.get('size')
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
storage_profile = specs.get('storagetype:storageprofile')
replay_profile_string = specs.get('storagetype:replayprofiles')
LOG.debug('Creating volume %(name)s of size %(size)s',
{'name': volume_name,
'size': volume_size})
scvolume = None
with self._client.open_connection() as api:
try:
if api.find_sc():
scvolume = api.create_volume(volume_name,
volume_size,
storage_profile,
replay_profile_string)
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume %s') %
volume_name)
# Update Consistency Group
self._add_volume_to_consistency_group(api, scvolume, volume)
# Create replications. (Or not. It checks.)
model_update = self._create_replications(api, volume, scvolume)
except Exception:
# if we actually created a volume but failed elsewhere
# clean up the volume now.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
data=_('Unable to create volume. Backend down.'))
return model_update
def _split(self, replication_driver_data):
ssnstrings = []
if replication_driver_data:
for str in replication_driver_data.split(','):
ssnstring = str.strip()
if ssnstring:
ssnstrings.append(ssnstring)
return ssnstrings
def _delete_replications(self, api, volume):
"""Delete replications associated with a given volume.
We should be able to roll through the replication_driver_data list
of SSNs and delete replication objects between them and the source
volume.
:param api: Dell REST API object.
:param volume: Cinder Volume object
:return:
"""
do_repl, sync = self._do_repl(api, volume)
if do_repl:
volume_name = volume.get('id')
scvol = api.find_volume(volume_name)
replication_driver_data = volume.get('replication_driver_data')
# This is just a string of ssns separated by commas.
ssnstrings = self._split(replication_driver_data)
# Trundle through these and delete them all.
for ssnstring in ssnstrings:
ssn = int(ssnstring)
if not api.delete_replication(scvol, ssn):
LOG.warning(_LW('Unable to delete replication of '
'Volume %(vname)s to Storage Center '
'%(sc)s.'),
{'vname': volume_name,
'sc': ssnstring})
# If none of that worked or there was nothing to do doesn't matter.
# Just move on.
def delete_volume(self, volume):
deleted = False
# We use id as our name as it is unique.
volume_name = volume.get('id')
LOG.debug('Deleting volume %s', volume_name)
with self._client.open_connection() as api:
try:
if api.find_sc():
self._delete_replications(api, volume)
deleted = api.delete_volume(volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete volume %s'),
volume_name)
# if there was an error we will have raised an
# exception. If it failed to delete it is because
# the conditions to delete a volume were not met.
if deleted is False:
raise exception.VolumeIsBusy(volume_name=volume_name)
def create_snapshot(self, snapshot):
"""Create snapshot"""
# our volume name is the volume id
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Creating snapshot %(snap)s on volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if scvolume is not None:
if api.create_replay(scvolume,
snapshot_id,
0) is not None:
snapshot['status'] = 'available'
return
else:
LOG.warning(_LW('Unable to locate volume:%s'),
volume_name)
snapshot['status'] = 'error_creating'
msg = _('Failed to create snapshot %s') % snapshot_id
raise exception.VolumeBackendAPIException(data=msg)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other volume's snapshot on appliance."""
model_update = {}
scvolume = None
src_volume_name = snapshot.get('volume_id')
# This snapshot could have been created on its own or as part of a
# cgsnapshot. If it was a cgsnapshot it will be identified on the Dell
# backend under cgsnapshot_id. Given the volume ID and the
# cgsnapshot_id we can find the appropriate snapshot.
# So first we look for cgsnapshot_id. If that is blank then it must
# have been a normal snapshot which will be found under snapshot_id.
snapshot_id = snapshot.get('cgsnapshot_id')
if not snapshot_id:
snapshot_id = snapshot.get('id')
volume_name = volume.get('id')
LOG.debug(
'Creating new volume %(vol)s from snapshot %(snap)s '
'from vol %(src)s',
{'vol': volume_name,
'snap': snapshot_id,
'src': src_volume_name})
with self._client.open_connection() as api:
try:
if api.find_sc():
srcvol = api.find_volume(src_volume_name)
if srcvol is not None:
replay = api.find_replay(srcvol,
snapshot_id)
if replay is not None:
volume_name = volume.get('id')
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
replay_profile_string = specs.get(
'storagetype:replayprofiles')
scvolume = api.create_view_volume(
volume_name, replay, replay_profile_string)
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume '
'%(name)s from %(snap)s.') %
{'name': volume_name,
'snap': snapshot_id})
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
# Replicate if we are supposed to.
model_update = self._create_replications(api,
volume,
scvolume)
except Exception:
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s created from %(snap)s',
{'vol': volume_name,
'snap': snapshot_id})
else:
msg = _('Failed to create volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
model_update = {}
scvolume = None
src_volume_name = src_vref.get('id')
volume_name = volume.get('id')
LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s',
{'clone': volume_name,
'vol': src_volume_name})
with self._client.open_connection() as api:
try:
if api.find_sc():
srcvol = api.find_volume(src_volume_name)
if srcvol is not None:
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
replay_profile_string = specs.get(
'storagetype:replayprofiles')
# Create our volume
scvolume = api.create_cloned_volume(
volume_name, srcvol, replay_profile_string)
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume '
'%(name)s from %(vol)s.') %
{'name': volume_name,
'vol': src_volume_name})
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
# Replicate if we are supposed to.
model_update = self._create_replications(api,
volume,
scvolume)
except Exception:
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s cloned from %(src)s',
{'vol': volume_name,
'src': src_volume_name})
else:
msg = _('Failed to create volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def delete_snapshot(self, snapshot):
"""delete_snapshot"""
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if scvolume is not None:
if api.delete_replay(scvolume,
snapshot_id):
return
# if we are here things went poorly.
snapshot['status'] = 'error_deleting'
msg = _('Failed to delete snapshot %s') % snapshot_id
raise exception.VolumeBackendAPIException(data=msg)
def create_export(self, context, volume, connector):
"""Create an export of a volume.
The volume exists on creation and will be visible on
initialize connection. So nothing to do here.
"""
# TODO(tswanson): Move mapping code here.
pass
def ensure_export(self, context, volume):
"""Ensure an export of a volume.
Per the eqlx driver we just make sure that the volume actually
exists where we think it does.
"""
scvolume = None
volume_name = volume.get('id')
LOG.debug('Checking existence of volume %s', volume_name)
with self._client.open_connection() as api:
try:
if api.find_sc():
scvolume = api.find_volume(volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume %s'),
volume_name)
if scvolume is None:
msg = _('Unable to find volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
def remove_export(self, context, volume):
"""Remove an export of a volume.
We do nothing here to match the nothing we do in create export. Again
we do everything in initialize and terminate connection.
"""
pass
def extend_volume(self, volume, new_size):
"""Extend the size of the volume."""
volume_name = volume.get('id')
LOG.debug('Extending volume %(vol)s to %(size)s',
{'vol': volume_name,
'size': new_size})
if volume is not None:
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if api.expand_volume(scvolume, new_size) is not None:
return
# If we are here nothing good happened.
msg = _('Unable to extend volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
with self._client.open_connection() as api:
storageusage = api.get_storage_usage() if api.find_sc() else None
# all of this is basically static for now
data = {}
data['volume_backend_name'] = self.backend_name
data['vendor_name'] = 'Dell'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['reserved_percentage'] = 0
data['free_capacity_gb'] = 'unavailable'
data['total_capacity_gb'] = 'unavailable'
data['consistencygroup_support'] = True
# In theory if storageusage is None then we should have
# blown up getting it. If not just report unavailable.
if storageusage is not None:
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
data['QoS_support'] = False
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
data['replication_type'] = ['async', 'sync']
data['replication_count'] = len(self.backends)
self._stats = data
LOG.debug('Total cap %(total)s Free cap %(free)s',
{'total': data['total_capacity_gb'],
'free': data['free_capacity_gb']})
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
# We use id as our volume name so we need to rename the backend
# volume to the original volume name.
original_volume_name = volume.get('id')
current_name = new_volume.get('id')
LOG.debug('update_migrated_volume: %(current)s to %(original)s',
{'current': current_name,
'original': original_volume_name})
if original_volume_name:
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(current_name)
if (scvolume and
api.rename_volume(scvolume, original_volume_name)):
# Replicate if we are supposed to.
model_update = self._create_replications(api,
new_volume,
scvolume)
model_update['_name_id'] = None
return model_update
# The world was horrible to us so we should error and leave.
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
original_volume_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
def create_consistencygroup(self, context, group):
"""This creates a replay profile on the storage backend.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:returns: Nothing on success.
:raises: VolumeBackendAPIException
"""
gid = group['id']
with self._client.open_connection() as api:
cgroup = api.create_replay_profile(gid)
if cgroup:
LOG.info(_LI('Created Consistency Group %s'), gid)
return
msg = _('Unable to create consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
def delete_consistencygroup(self, context, group, volumes):
"""Delete the Dell SC profile associated with this consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:returns: Updated model_update, volumes.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if profile:
api.delete_replay_profile(profile)
# If we are here because we found no profile that should be fine
# as we are trying to delete it anyway.
# Now whack the volumes. So get our list.
volumes = self.db.volume_get_all_by_group(context, gid)
# Trundle through the list deleting the volumes.
for volume in volumes:
self.delete_volume(volume)
volume['status'] = 'deleted'
model_update = {'status': group['status']}
return model_update, volumes
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
model_update is a dictionary that the driver wants the manager
to update upon a successful return. If None is returned, the manager
will set the status to 'available'.
add_volumes_update and remove_volumes_update are lists of dictionaries
that the driver wants the manager to update upon a successful return.
Note that each entry requires a {'id': xxx} so that the correct
volume entry can be updated. If None is returned, the volume will
remain its original status. Also note that you cannot directly
assign add_volumes to add_volumes_update as add_volumes is a list of
cinder.db.sqlalchemy.models.Volume objects and cannot be used for
db update directly. Same with remove_volumes.
If the driver throws an exception, the status of the group as well as
those of the volumes to be added/removed will be set to 'error'.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if not profile:
LOG.error(_LE('Cannot find Consistency Group %s'), gid)
elif api.update_cg_volumes(profile,
add_volumes,
remove_volumes):
LOG.info(_LI('Updated Consistency Group %s'), gid)
# we need nothing updated above us so just return None.
return None, None, None
# Things did not go well so throw.
msg = _('Unable to update consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Takes a snapshot of the consistency group.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to take.
:returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.debug('profile %s replayid %s', profile, snapshotid)
if api.snap_cg_replay(profile, snapshotid, 0):
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, snapshotid)
for snapshot in snapshots:
snapshot.status = 'available'
model_update = {'status': 'available'}
return model_update, snapshots
# That didn't go well. Tell them why. Then bomb out.
LOG.error(_LE('Failed to snap Consistency Group %s'), cgid)
else:
LOG.error(_LE('Cannot find Consistency Group %s'), cgid)
msg = _('Unable to snap Consistency Group %s') % cgid
raise exception.VolumeBackendAPIException(data=msg)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot.
If profile isn't found return success. If failed to delete the
replay (the snapshot) then raise an exception.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to delete.
:returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'),
{'ss': snapshotid,
'pro': profile})
if not api.delete_cg_replay(profile, snapshotid):
msg = (_('Unable to delete Consistency Group snapshot %s')
% snapshotid)
raise exception.VolumeBackendAPIException(data=msg)
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, snapshotid)
for snapshot in snapshots:
snapshot.status = 'deleted'
model_update = {'status': 'deleted'}
return model_update, snapshots
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
api.manage_existing(volume['id'], existing_ref)
# Replicate if we are supposed to.
scvolume = api.find_volume(volume['id'])
model_update = self._create_replications(api, volume, scvolume)
if model_update:
return model_update
else:
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
# Only return a model_update if we have replication info to add.
return None
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
return api.get_unmanaged_volume_size(existing_ref)
else:
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
:param volume: Cinder volume to unmanage
"""
with self._client.open_connection() as api:
scvolume = api.find_volume(volume['id'])
if scvolume:
api.unmanage(scvolume)
def _get_retype_spec(self, diff, volume_name, specname, spectype):
"""Helper function to get current and requested spec.
:param diff: A difference dictionary.
:param volume_name: The volume name we are working with.
:param specname: The pretty name of the parameter.
:param spectype: The actual spec string.
:return: current, requested spec.
:raises: VolumeBackendAPIException
"""
spec = (diff['extra_specs'].get(spectype))
if spec:
if len(spec) != 2:
msg = _('Unable to retype %(specname)s, expected to receive '
'current and requested %(spectype)s values. Value '
'received: %(spec)s') % {'specname': specname,
'spectype': spectype,
'spec': spec}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
current = spec[0]
requested = spec[1]
if current != requested:
LOG.debug('Retyping volume %(vol)s to use %(specname)s '
'%(spec)s.',
{'vol': volume_name,
'specname': specname,
'spec': requested})
return current, requested
else:
LOG.info(_LI('Retype was to same Storage Profile.'))
return None, None
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
model_update = None
# Any spec changes?
if diff['extra_specs']:
volume_name = volume.get('id')
with self._client.open_connection() as api:
try:
# Get our volume
scvolume = api.find_volume(volume_name)
if scvolume is None:
LOG.error(_LE('Retype unable to find volume %s.'),
volume_name)
return False
# Check our specs.
# Storage profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Storage Profile',
'storagetype:storageprofile'))
# if there is a change and it didn't work fast fail.
if (current != requested and not
api.update_storage_profile(scvolume, requested)):
LOG.error(_LE('Failed to update storage profile'))
return False
# Replay profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Replay Profiles',
'storagetype:replayprofiles'))
# if there is a change and it didn't work fast fail.
if requested and not api.update_replay_profiles(scvolume,
requested):
LOG.error(_LE('Failed to update replay profiles'))
return False
# Replication_enabled.
current, requested = (
self._get_retype_spec(diff,
volume_name,
'replication_enabled',
'replication_enabled'))
# if there is a change and it didn't work fast fail.
if current != requested:
if requested:
model_update = self._create_replications(api,
volume,
scvolume)
else:
self._delete_replications(api, volume)
model_update = {'replication_status': 'disabled',
'replication_driver_data': ''}
# Active Replay
current, requested = (
self._get_retype_spec(diff, volume_name,
'Replicate Active Replay',
'replication:activereplay'))
if current != requested and not (
api.update_replicate_active_replay(
scvolume, requested == '<is> True')):
LOG.error(_LE('Failed to apply '
'replication:activereplay setting'))
return False
# TODO(tswanson): replaytype once it actually works.
except exception.VolumeBackendAPIException:
# We do nothing with this. We simply return failure.
return False
# If we have something to send down...
if model_update:
return model_update
return True
def replication_enable(self, context, vref):
"""Re-enable replication on vref.
:param context: NA
:param vref: Cinder volume reference.
:return: model_update.
"""
volumename = vref.get('id')
LOG.info(_LI('Enabling replication on %s'), volumename)
model_update = {}
with self._client.open_connection() as api:
replication_driver_data = vref.get('replication_driver_data')
destssns = self._split(replication_driver_data)
do_repl, sync = self._do_repl(api, vref)
if destssns and do_repl:
scvolume = api.find_volume(volumename)
if scvolume:
for destssn in destssns:
if not api.resume_replication(scvolume, int(destssn)):
LOG.error(_LE('Unable to resume replication on '
'volume %(vol)s to SC %(ssn)s'),
{'vol': volumename,
'ssn': destssn})
model_update['replication_status'] = 'error'
break
else:
LOG.error(_LE('Volume %s not found'), volumename)
else:
LOG.error(_LE('Replication not enabled or no replication '
'destinations found. %s'),
volumename)
return model_update
def replication_disable(self, context, vref):
"""Disable replication on vref.
:param context: NA
:param vref: Cinder volume reference.
:return: model_update.
"""
volumename = vref.get('id')
LOG.info(_LI('Disabling replication on %s'), volumename)
model_update = {}
with self._client.open_connection() as api:
replication_driver_data = vref.get('replication_driver_data')
destssns = self._split(replication_driver_data)
do_repl, sync = self._do_repl(api, vref)
if destssns and do_repl:
scvolume = api.find_volume(volumename)
if scvolume:
for destssn in destssns:
if not api.pause_replication(scvolume, int(destssn)):
LOG.error(_LE('Unable to pause replication on '
'volume %(vol)s to SC %(ssn)s'),
{'vol': volumename,
'ssn': destssn})
model_update['replication_status'] = 'error'
break
else:
LOG.error(_LE('Volume %s not found'), volumename)
else:
LOG.error(_LE('Replication not enabled or no replication '
'destinations found. %s'),
volumename)
return model_update
def _find_host(self, ssnstring):
"""Find the backend associated with this ssnstring.
:param ssnstring: The ssn of the storage center we are looking for.
:return: The managed_backend_name associated with said storage center.
"""
for backend in self.backends:
if ssnstring == backend['target_device_id']:
return backend['managed_backend_name']
return None
def _parse_secondary(self, api, vref, secondary):
"""Find the replication destination associated with secondary.
:param api: Dell StorageCenterApi
:param vref: Cinder Volume
:param secondary: String indicating the secondary to failover to.
:return: Destination SSN and the host string for the given secondary.
"""
LOG.debug('_parse_secondary. Looking for %s.', secondary)
replication_driver_data = vref['replication_driver_data']
destssn = None
host = None
ssnstrings = self._split(replication_driver_data)
# Trundle through these and delete them all.
for ssnstring in ssnstrings:
# If they list a secondary it has to match.
# If they do not list a secondary we return the first
# replication on a working system.
if not secondary or secondary == ssnstring:
# Is a string. Need an int.
ssn = int(ssnstring)
# Without the source being up we have no good
# way to pick a destination to failover to. So just
# look for one that is just up.
try:
# If the SC ssn exists check if we are configured to
# use it.
if api.find_sc(ssn):
host = self._find_host(ssnstring)
# If host then we are configured.
if host:
# Save our ssn and get out of here.
destssn = ssn
break
except exception.VolumeBackendAPIException:
LOG.warning(_LW('SSN %s appears to be down.'), ssn)
LOG.info(_LI('replication failover secondary is %(ssn)s %(host)s'),
{'ssn': destssn,
'host': host})
return destssn, host
def replication_failover(self, context, vref, secondary):
"""Failover to secondary.
The flow is as follows.
1.The user explicitly requests a failover of a replicated volume.
2.Driver breaks replication.
a. Neatly by deleting the SCReplication object if the
primary is still up.
b. Brutally by unmapping the replication volume if it isn't.
3.We rename the volume to "Cinder failover <Volume GUID>"
4.Change Cinder DB entry for which backend controls the volume
to the backend listed in the replication_device.
5.That's it.
Completion of the failover is done on first use on the new backend.
We do this by modifying the find_volume function.
Find volume searches the following places in order:
1. "<Volume GUID>" in the backend's volume folder.
2. "<Volume GUID>" outside of the volume folder.
3. "Cinder failover <Volume GUID>" anywhere on the system.
If "Cinder failover <Volume GUID>" was found:
1.Volume is renamed to "<Volume GUID>".
2.Volume is moved to the new backend's volume folder.
3.The volume is now available on the secondary backend.
:param context;
:param vref: Cinder volume reference.
:param secondary: SSN of the destination Storage Center
:return: model_update on failover.
"""
LOG.info(_LI('Failing replication %(vol)s to %(sec)s'),
{'vol': vref.get('id'),
'sec': secondary})
# If we fall through this is our error.
msg = _('Unable to failover replication.')
with self._client.open_connection() as api:
# Basic check. We should never get here.
do_repl, sync = self._do_repl(api, vref)
if not do_repl:
# If we did get here then there is a disconnect. Set our
# message and raise (below).
msg = _('Unable to failover unreplicated volume.')
else:
# Look for the specified secondary.
destssn, host = self._parse_secondary(api, vref, secondary)
if destssn and host:
volumename = vref.get('id')
# This will break the replication on the SC side. At the
# conclusion of this the destination volume will be
# renamed to indicate failover is in progress. We will
# pick the volume up on the destination backend later.
if api.break_replication(volumename, destssn):
model_update = {}
model_update['host'] = host
model_update['replication_driver_data'] = None
return model_update
# We are here. Nothing went well.
LOG.error(_LE('Unable to break replication from '
'%(from)s to %(to)d.'),
{'from': volumename,
'to': destssn})
else:
LOG.error(_LE('Unable to find valid destination.'))
# We raise to indicate something bad happened.
raise exception.ReplicationError(volume_id=vref.get('id'),
reason=msg)
def list_replication_targets(self, context, vref):
"""Lists replication targets for the given vref.
We return targets the volume has been setup to replicate to and that
are configured on this backend.
:param context: NA
:param vref: Cinder volume object.
:return: A dict of the form {'volume_id': id,
'targets': [ {'type': xxx,
'target_device_id': xxx,
'backend_name': xxx}]}
"""
LOG.debug('list_replication_targets for volume %s', vref.get('id'))
targets = []
with self._client.open_connection() as api:
do_repl, sync = self._do_repl(api, vref)
# If we have no replication_driver_data then we have no replication
# targets
replication_driver_data = vref.get('replication_driver_data')
ssnstrings = self._split(replication_driver_data)
# If we have data.
if ssnstrings:
# Trundle through our backends.
for backend in self.backends:
# If we find a backend then we report it.
if ssnstrings.count(backend['target_device_id']):
target = {}
target['type'] = 'managed'
target['target_device_id'] = (
backend['target_device_id'])
target['backend_name'] = (
backend['managed_backend_name'])
targets.append(target)
else:
# We note if the source is not replicated to a
# configured destination for the backend.
LOG.info(_LI('Volume %(guid)s not replicated to '
'backend %(name)s'),
{'guid': vref['id'],
'name': backend['managed_backend_name']})
# At this point we note that what we found and what we
# expected to find were two different things.
if len(ssnstrings) != len(targets):
LOG.warning(_LW('Expected replication count %(rdd)d does '
'match configured replication count '
'%(tgt)d.'),
{'rdd': len(ssnstrings),
'tgt': len(targets)})
# Format response.
replication_targets = {'volume_id': vref.get('id'), 'targets': targets}
LOG.info(_LI('list_replication_targets: %s'), replication_targets)
return replication_targets
| apache-2.0 | 7,205,747,925,299,166,000 | 44.622314 | 79 | 0.531366 | false |
Southpaw-TACTIC/TACTIC | src/pyasm/search/mongodb_test.py | 1 | 4510 | ###########################################################
#
# Copyright (c) 2013, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import tacticenv
from pyasm.security import Batch
from pyasm.biz import Project
from pyasm.search import SearchType, SObject, Update, Insert, Search, DbContainer
from pyasm.unittest import UnittestEnvironment
import unittest
class SqlTest(unittest.TestCase):
def test_all(self):
#test_env = UnittestEnvironment()
#test_env.create()
try:
self._test_get_by_search_keys()
self._test_relationship()
self._create_table()
self._test_search()
finally:
#test_env.delete()
pass
def _test_get_by_search_keys(self):
search_keys = ['table/posts?project=mongodb&code=POSTS52086a28e138236a389e670e']
sobjects = Search.get_by_search_keys(search_keys, keep_order=True)
self.assertEquals( 1, len(sobjects) )
def _test_relationship(self):
search_type = "table/posts?project=mongodb"
search_type2 = "table/posts2?project=mongodb"
from pyasm.biz import Schema
schema = Schema.get()
relationship = schema.get_relationship(search_type, search_type2)
self.assertEquals(None, relationship)
search_type2 = "sthpw/task"
relationship = schema.get_relationship(search_type, search_type2)
self.assertEquals("search_id", relationship)
attrs = schema.get_relationship_attrs(search_type, search_type2)
self.assertEquals("*", attrs.get("to") )
self.assertEquals("search_type", attrs.get("relationship") )
def _create_table(self):
from pyasm.search import CreateTable
project = Project.get_by_code("mongodb")
#sql = project.get_sql()
db_resource = project.get_project_db_resource()
sql = db_resource.get_sql()
create = CreateTable()
create.set_table("whatever")
create.commit(sql)
def _test_search(self):
# test search
search = Search("table/posts?project=mongodb")
sobject = search.get_sobject()
self.assertNotEquals(None, sobject)
sobjects = search.get_sobjects()
#for sobject in sobjects:
# print sobject.get_value("_id"), sobject.get_value("author")
# test simple filter
search = Search("table/posts?project=mongodb")
search.add_filter("author", "Mike")
sobjects = search.get_sobjects()
for sobject in sobjects:
self.assertEquals("Mike", sobject.get_value("author") )
# do an update
sobject = sobjects[0]
sobject.set_value("text", "This is wonderful!!!")
sobject.commit()
# search by id
object_id = sobject.get_id()
updated_sobject = Search.get_by_id("table/posts?project=mongodb", object_id)
self.assertNotEquals(None, updated_sobject)
self.assertEquals("This is wonderful!!!", updated_sobject.get_value("text"))
# search by search_key
search_key = sobject.get_search_key()
test_sobject = Search.get_by_search_key(search_key)
print("search_key: ", search_key)
print("test: ", test_sobject.get_data())
self.assertNotEquals(None, test_sobject)
# create a new one
sobject = SearchType.create("table/posts?project=mongodb")
sobject.set_value("author", "Cindy")
sobject.set_value("text", "My second blog post!")
sobject.set_value("tags", ["mongodb", "python", "pymongo"])
sobject.commit()
return
print("---")
count = search.get_count()
print("count: ", count)
search.add_order_by("author")
sobjects = search.get_sobjects()
for sobject in sobjects:
print(sobject.get_value("_id"), sobject.get_value("author"))
print("---")
search = Search("table/posts?project=mongodb")
search.add_order_by("author")
search.add_filter("author", "Fred", op=">")
sobjects = search.get_sobjects()
for sobject in sobjects:
print(sobject.get_value("_id"), sobject.get_value("author"))
if __name__ == "__main__":
Batch()
unittest.main()
| epl-1.0 | 784,356,280,658,903,200 | 24.771429 | 88 | 0.602882 | false |
simoncozens/GlyphsPlugins | Comb.glyphsFilter/Contents/Resources/plugin.py | 1 | 3651 | # encoding: utf-8
from GlyphsApp.plugins import *
from math import cos, sin
from glyphmonkey import *
from itertools import izip
from GlyphsApp import LINE
class Comb(FilterWithDialog):
# Definitions of IBOutlets
# The NSView object from the User Interface. Keep this here!
dialog = objc.IBOutlet()
# Text field in dialog
myTextField = objc.IBOutlet()
def settings(self):
self.menuName = Glyphs.localize({'en': u'Comb Effect', 'de': u'Comb'})
# Load dialog from .nib (without .extension)
self.loadNib('IBdialog')
# On dialog show
def start(self):
# Set default setting if not present
if not Glyphs.defaults['org.simon-cozens.comb.teeth']:
Glyphs.defaults['org.simon-cozens.comb.teeth'] = "0,0.05,0.1,0.15,0.2,0.3,0.35,0.65,0.7,0.8,0.85,0.9,0.95,1"
self.myTextField.setStringValue_(Glyphs.defaults['org.simon-cozens.comb.teeth'])
self.myTextField.becomeFirstResponder()
# Action triggered by UI
@objc.IBAction
def setValue_( self, sender ):
# Store value coming in from dialog
Glyphs.defaults['org.simon-cozens.comb.teeth'] = sender.stringValue()
# Trigger redraw
self.update()
# Actual filter
def filter(self, layer, inEditView, customParameters):
# Called on font export, get value from customParameters
if customParameters.has_key('teeth'):
value = customParameters['teeth']
# Called through UI, use stored value
else:
value = Glyphs.defaults['org.simon-cozens.comb.teeth']
# Split teeth into array of arrays
t = map(float,value.split(","))
teeth = zip(t[::2], t[1::2])
self.combIt(layer, teeth)
def combIt(self, layer, teeth):
pathset = []
for a in layer.paths:
# Find the two smallest "ends"
l1, s1, l2, s2 = None, None, None, None
for i in range(0,len(a.segments)):
s = a.segments[i]
if type(s) is GSLineSegment and (not l1 or s.length < l1):
s1 = i
l1 = s.length
for i in range(0,len(a.segments)):
s = a.segments[i]
if type(s) is GSLineSegment and (s.length >= l1 and (not l2 or s.length < l2) and i != s1):
s2 = i
l2 = s.length
if s1 > s2: s1, s2 = s2, s1
print("Identified path end segments:")
print(a.segments[s1], a.segments[s2])
# Find two edges between segments
edge1 = [ a.segments[i] for i in range(s1+1, s2) ]
edge2 = [ a.segments[i] for i in range(s2+1, len(a.segments))]
edge2.extend([a.segments[i] for i in range(0, s1)])
for i in range(0, len(edge2)): edge2[i].reverse()
edge2.reverse()
print("\nIdentified edges")
print("Edge 1:", edge1)
print("Edge 2:", edge2)
print("Teeth ", teeth)
if len(edge1) != len(edge2):
print("Edges not compatible in " + str(layer) + " - differing number of points")
raise TypeError
for tooth in teeth:
start, end = tooth[0],tooth[1]
segs1 = []
segs2 = []
for i in range(0, len(edge1)):
segs1.append(edge1[i].interpolate(edge2[i],start))
segs2.append(edge1[i].interpolate(edge2[i],end))
for i in range(0, len(segs2)): segs2[i].reverse()
segs2.reverse()
segs1.append(GSLineSegment(tuple = (segs1[-1]._seg[-1],segs2[0]._seg[0])))
segs1.extend(segs2)
segs1.append(GSLineSegment(tuple = (segs2[-1]._seg[-1],segs1[0]._seg[0])))
segs = segs1
path = GSPath()
path.parent = a.parent
path.segments = segs
print("Adding ", path, " to ",pathset)
pathset.append(path)
path.closed = True
print(pathset)
layer.paths = pathset
def generateCustomParameter( self ):
return "%s; teeth:%s;" % (self.__class__.__name__, Glyphs.defaults['org.simon-cozens.comb.teeth'] )
| mit | -6,872,188,751,832,791,000 | 29.425 | 111 | 0.641468 | false |
bucko909/powerpod | powerpod/types.py | 1 | 25068 | from collections import namedtuple
import datetime
import calendar
import struct
import sys
class StructType(object):
"""
Automatically uses SHAPE to pack/unpack simple structs.
"""
@classmethod
def from_binary(cls, data):
try:
return cls(*cls._decode(*struct.unpack(cls.SHAPE, data)))
except:
sys.stderr.write("Error parsing {!r}\n".format(data))
raise
@staticmethod
def _decode(*args):
""" data from unpack -> data for __init__ """
return args
def to_binary(self):
return struct.pack(self.SHAPE, *self._encode())
def _encode(self):
""" data from self -> data for pack """
return self
@classmethod
def byte_size(cls):
return struct.Struct(cls.SHAPE).size
class StructListType(object):
"""
Automatically uses SHAPE to pack/unpack simple structs which are followed by lists of RECORD_TYPE records.
You must have 'size' in _fields, which must be the record count, and a 'records' field to hold the decoded records.
RECORD_TYPE must have a 'size', and a 'from_binary' function.
"""
@classmethod
def from_binary(cls, data):
encode = struct.Struct(cls.SHAPE)
header_size = cls.byte_size()
header = encode.unpack(data[:header_size])
record_size = cls.RECORD_TYPE.byte_size()
try:
# Specifies number of records
size_offset = cls._fields.index('size')
record_count = header[size_offset]
assert header_size + record_count * record_size == len(data), (header_size, record_count, record_size, len(data))
except ValueError:
# Specifies length of data
size_offset = cls._fields.index('data_size')
total_size = header[size_offset]
assert len(data) == header_size + total_size, (header_size, total_size, len(data))
assert total_size % record_size == 0, (total_size, record_size)
record_count = header[size_offset] / record_size
raw_records = [data[header_size + record_size * x:header_size + record_size * (x + 1)] for x in range(record_count)]
return cls(*(cls._decode(*header) + (map(cls.RECORD_TYPE.from_binary, raw_records),)))
@staticmethod
def _decode(*args):
""" data from unpack -> data for __init__ """
return args
def to_binary(self):
data_binary = ''.join(record.to_binary() for record in self.records)
if hasattr(self, 'size'):
assert self.size == len(self.records), (self.size, len(self.records))
else:
assert self.data_size == len(data_binary), (self.data_size, data_binary)
return struct.pack(self.SHAPE, *self._encode()) + data_binary
def _encode(self):
""" data from self -> data for pack """
record_offset = self._fields.index('records')
return self[:record_offset] + self[record_offset+1:]
@classmethod
def byte_size(cls):
return struct.Struct(cls.SHAPE).size
TIME_FIELDS = [
('secs', 'b'),
('mins', 'b'),
('hours', 'b'),
('day', 'b'),
('month', 'b'),
('month_length', 'b'),
('year', 'h'),
]
class NewtonTime(StructType, namedtuple('NewtonTime', zip(*TIME_FIELDS)[0])):
SHAPE = '<' + ''.join(zip(*TIME_FIELDS)[1])
def as_datetime(self):
return datetime.datetime(self.year, self.month, self.day, self.hours, self.mins, self.secs)
@classmethod
def from_datetime(cls, datetime):
days_in_month = calendar.monthrange(datetime.year, datetime.month)[1]
return cls(datetime.second, datetime.minute, datetime.hour, datetime.day, datetime.month, days_in_month, datetime.year)
PROFILE_FIELDS = [
('unknown_0', 'h'),
# Facts about sample_smoothing flags:
# If I send (in GetProfileData) 0x0000, I get (in SetProfileData) 0x0800.
# If I send 0xffff, I get 0xffdf.
# If I send 0x0539, I get 0x0d19.
# If I send 0x2ef0, I get 0x2ed0.
# Both of these are preserved.
# Conclusion: 0x0800 must be set, 0x0020 must be unset.
# Switching from 5s sample smoothing to 1s sets 0x0008. Setting back unsets it.
# Annoyingly, Isaac only resets to '1 sec' when you 'Get from iBike' -- it'll never reset to '5 sec', so I guess it just checks the flag.
# Conclusion: 0x0008 is the "don't smooth for 5s" flag.
# A reset profile gets 10251 (=0x280b)
('sample_smoothing', 'H', {14554: 1, 14546: 5}),
('unknown_1', 'h'),
('null_1', 'i'),
('null_2', 'h'),
# If I send 0x0000, I get 0x8009.
# If I send 0x8009, I get 0x8009.
# If I send 0xffff, I get 0x8009.
# If I then set the 'user-edited' flag by messing with stuff, I get 0x8005.
# Reset to factory default -> 0x800c
# Save/load profile 0x800c -> 0x8009
# Mess with settings 0x8009 -> 0x8005
# Save/load profile 0x8005 -> 0x8009
# Factory default is actually recognised by model (aero/fric etc) values.
# On a pristine profile, I see 0x800e or 0x800d and it's reset to 0x8009 with just a get/set. On an old recording, I saw it reset to 0x8005 on a user-edit.
# Resetting the profile gets 0x800c. Setting it once (or running through setup) gets 0x800d.
# bit 0 1 2 3
# reset 0 0 1 1
# user-edited 1 0 1 0
# save/load 1 0 0 1
# TODO TODO TODO
('user_edited', 'H', {0x8009: False, 0x8005: True}),
('total_mass_lb', 'h'),
('wheel_circumference_mm', 'h'),
('null_3', 'h'),
('unknown_3', 'h'),
('unknown_2', 'h'),
('unknown_4', 'H'),
('unknown_5', 'h'),
('aero', 'f'),
('fric', 'f'),
('unknown_6', 'f'),
('unknown_7', 'f'),
('unknown_8', 'i'),
('wind_scaling_sqrt', 'f'),
('tilt_cal', 'h'),
('cal_mass_lb', 'h'),
('rider_mass_lb', 'h'),
('unknown_9', 'h'),
# ftp_per_kilo_ish:
# Unaffected by bike weight/total weight. Just rider weight.
# rider(lb) FTP 20min value
# 100 38 40 1 # Min valid
# 100 85 91 1
# 100 86 92 2
# 100 105 111 2
# 100 106 112 3
# 100 120 126 3
# 100 121 127 4
# 100 149 157 4
# 100 150 158 5
# 100 163 172 5
# 100 164 173 6
# 100 183 193 6
# 100 184 194 7
# 100 207 218 7
# 100 208 219 8
# 100 227 239 8
# 100 228 240 9
# 100 247 260 9
# 100 248 261 10 # Stops increasing
# 80 200 211 10 # Stops increasing
# 81 200 211 9
# 88 200 211 9
# 89 200 211 8
# 96 200 211 8
# 97 200 211 7
# 109 200 211 7
# 110 200 211 6
# 122 200 211 6
# 123 200 211 5
# 134 200 211 5
# 135 200 211 4
# 165 200 211 4
# 166 200 211 3
# 189 200 211 3
# 190 200 211 2
# 232 200 211 2
# 233 200 211 1
# Roughly, this is (ftp_per_kilo-1.2)/0.454
# The values around 3 seem underestimated (formula underestimates).
# I think this is something related to the Coggan scale,
# which goes from 1.26 FTPW/kg to 6.6 FTPW/kg
('ftp_per_kilo_ish', 'h'),
('watts_20_min', 'h'), # = FTP / 0.95
('unknown_a', 'h'), # 0x0301 -> 0x0b01 (+0x0800) when sample rate changed to 1s. Never restored, though!
('speed_id', 'H'),
('cadence_id', 'H'),
('hr_id', 'H'),
('power_id', 'H'),
('speed_type', 'B'),
('cadence_type', 'B'),
('hr_type', 'B'),
('power_type', 'B'),
('power_smoothing_seconds', 'H'),
('unknown_c', 'h'), # 0x0032
]
class NewtonProfile(StructType, namedtuple('NewtonProfile', zip(*PROFILE_FIELDS)[0])):
SHAPE = '<' + ''.join(zip(*PROFILE_FIELDS)[1])
@classmethod
def _decode(cls, *args):
# Alert when any of these are interesting.
assert args[cls._fields.index('unknown_0')] == 0x5c16, args[cls._fields.index('unknown_0')]
assert args[cls._fields.index('sample_smoothing')] in (0x38d2, 0x38da, 0x380b, 0x38fb, 0x382b, 0x38db, 0x280b), args[cls._fields.index('sample_smoothing')]
assert args[cls._fields.index('unknown_1')] == 0x382b, args[cls._fields.index('unknown_1')]
assert args[cls._fields.index('null_1')] == 0, args[cls._fields.index('null_1')]
assert args[cls._fields.index('null_2')] == 0, args[cls._fields.index('null_2')]
assert args[cls._fields.index('user_edited')] in (0x8009, 0x8005, 0x800d, 0x800c, 0x19, 0x8008), args[cls._fields.index('user_edited')]
assert args[cls._fields.index('null_3')] == 0, args[cls._fields.index('null_3')]
assert args[cls._fields.index('unknown_2')] in (0, 2), args[cls._fields.index('unknown_2')]
assert args[cls._fields.index('unknown_3')] in (0, 0x1988, 0x5f5c), args[cls._fields.index('unknown_3')]
assert args[cls._fields.index('unknown_4')] in (0xbc00, 0xe766, 0, 0x20ff), args[cls._fields.index('unknown_4')]
assert args[cls._fields.index('unknown_5')] in (0, 1), args[cls._fields.index('unknown_5')]
assert args[cls._fields.index('unknown_6')] in (-38.0, -10.0, 0.0), args[cls._fields.index('unknown_6')]
assert args[cls._fields.index('unknown_7')] in (1.0, 0.0), args[cls._fields.index('unknown_7')]
assert args[cls._fields.index('unknown_8')] == 1670644000, args[cls._fields.index('unknown_8')]
assert args[cls._fields.index('unknown_9')] in (1850, 1803), args[cls._fields.index('unknown_9')]
assert args[cls._fields.index('unknown_a')] in (0x0301, 0x0b01, 0x351), args[cls._fields.index('unknown_a')]
assert args[cls._fields.index('unknown_c')] == 50, args[cls._fields.index('unknown_c')]
args = list(args)
args[cls._fields.index('tilt_cal')] = args[cls._fields.index('tilt_cal')] * 0.1
return args
def _encode(self):
return self._replace(tilt_cal=int(round(self.tilt_cal * 10)))
@classmethod
def default(cls):
return cls(
total_mass_lb=205,
user_edited=0x8008,
wheel_circumference_mm=2096,
sample_smoothing=10251,
aero=0.4899250099658966,
fric=11.310999870300293,
unknown_6=0.0,
unknown_7=0.0,
wind_scaling_sqrt=1.1510859727859497,
speed_id=0,
cadence_id=0,
hr_id=0,
power_id=0,
speed_type=0,
cadence_type=0,
hr_type=0,
power_type=0,
tilt_cal=-0.7,
cal_mass_lb=205,
rider_mass_lb=180,
unknown_9=1803,
ftp_per_kilo_ish=1,
watts_20_min=85,
unknown_a=769,
# ^^ SetProfileData
power_smoothing_seconds=1,
unknown_c=50,
# ^^ SetProfileData2
unknown_0=0x5c16,
unknown_1=0x382b,
null_1=0,
null_2=0,
null_3=0,
unknown_3=0,
unknown_2=0,
unknown_4=0,
unknown_5=0,
unknown_8=1670644000,
# ^^^ Complete unknowns
)
def swap_endian(x):
return (x >> 8) + ((x & ((1 << 8) - 1)) << 8)
def to_signed(x, bits):
if x & 1 << (bits - 1):
return x - (1 << bits)
else:
return x
def to_unsigned(x, bits):
if x < 0:
return x + (1 << bits)
else:
return x
IDENTITY = lambda x: x
TO_TIMES_TEN_SIGNED = lambda base: lambda x: to_unsigned(int(x * 10), base)
FROM_TIMES_TEN_SIGNED = lambda base: lambda x: to_signed(x, base) * 0.1
FROM_TIMES_TEN = lambda x: x * 0.1
TO_TIMES_TEN = lambda x: int(x * 10)
RIDE_DATA_FIELDS = [
('elevation_feet', 16, lambda x: to_signed(swap_endian(x), 16), lambda x: swap_endian(to_unsigned(x, 16))),
('cadence', 8, IDENTITY, IDENTITY),
('heart_rate', 8, IDENTITY, IDENTITY),
('temperature_farenheit', 8, lambda x: x - 100, lambda x: x + 100),
('unknown_0', 9, lambda x: to_signed(x, 9), lambda x: to_unsigned(x, 9)),
('tilt', 10, FROM_TIMES_TEN_SIGNED(10), TO_TIMES_TEN_SIGNED(10)),
('speed_mph', 10, FROM_TIMES_TEN, TO_TIMES_TEN),
('wind_tube_pressure_difference', 10, IDENTITY, IDENTITY),
('power_watts', 11, IDENTITY, IDENTITY),
('dfpm_power_watts', 11, IDENTITY, IDENTITY),
('acceleration_maybe', 10, lambda x: to_signed(x, 10), lambda x: to_unsigned(x, 10)),
('stopped_flag_maybe', 1, IDENTITY, IDENTITY),
('unknown_3', 8, IDENTITY, IDENTITY), # if this is large, "drafting" becomes true
]
# unknown_0 seems to be highly correlated to altitude. It might be average or integrated tilt. It seems to affect the /first record/ of the ride in Isaac but not much else (small = high power, big = low power -- which supports it being some sort of tilt offset).
# acceleration_maybe seems negative when stopping, positive in general. My feeling is that it's forward acceleration. I can't get this to affect anything.
# Using 'set profile after the ride' seems to ignore both unknown_0 and acceleration_maybe. I guess they are internal values, but I can only guess what they might do.
assert sum(x[1] for x in RIDE_DATA_FIELDS) == 15 * 8
DECODE_FIFTEEN_BYTES = '{:08b}' * 15
ENCODE_FIFTEEN_BYTES = ''.join('{:0%sb}' % (fielddef[1],) for fielddef in RIDE_DATA_FIELDS)
class NewtonRideData(object):
SHAPE = '15s'
__slots__ = zip(*RIDE_DATA_FIELDS)[0]
def __init__(self, *args):
for name, value in zip(self.__slots__, args):
setattr(self, name, value)
@staticmethod
def byte_size():
# We are not a struct type, but we want to look like one.
return 15
@classmethod
def from_binary(cls, data):
if data.startswith('\xff\xff\xff\xff\xff\xff'):
return NewtonRideDataPaused.from_binary(data)
binary = DECODE_FIFTEEN_BYTES.format(*struct.unpack('15B', data))
vals = []
start = 0
for _name, size, decode, _encode in RIDE_DATA_FIELDS:
value = int(binary[start:start+size], 2)
start += size
vals.append(decode(value))
return cls(*vals)
def to_binary(self):
vals = []
for name, size, _decode, encode in RIDE_DATA_FIELDS:
value = getattr(self, name)
vals.append(encode(value))
binary = ENCODE_FIFTEEN_BYTES.format(*vals)
assert len(binary) == 15 * 8
chopped = [int(binary[x:x+8], 2) for x in range(0, 15*8, 8)]
return struct.pack('15B', *chopped)
@property
def elevation_metres(self):
return self.elevation_feet * 0.3048
def pressure_Pa(self, reference_pressure_Pa=101325, reference_temperature_kelvin=288.15):
return reference_pressure_Pa * (1 - (0.0065 * self.elevation_metres) / reference_temperature_kelvin) ** (9.80665 * 0.0289644 / 8.31447 / 0.0065)
@property
def temperature_kelvin(self):
return (self.temperature_farenheit + 459.67) * 5 / 9
def density(self, reference_pressure_Pa=101325, reference_temperature_kelvin=288.15):
# I say 0.8773 at 22.7778C/2516.7336m; they say 0.8768. Good enough...
# Constants from Wikipedia.
return self.pressure_Pa(reference_pressure_Pa, reference_temperature_kelvin) * 0.0289644 / 8.31447 / self.temperature_kelvin
def wind_speed_kph(self, offset=621, multiplier=13.6355, reference_pressure_Pa=101325, reference_temperature_kelvin=288.15, wind_scaling_sqrt=1.0):
# multiplier based on solving from CSV file
if self.wind_tube_pressure_difference < offset:
return 0.0
return ((self.wind_tube_pressure_difference - offset) / self.density(reference_pressure_Pa, reference_temperature_kelvin) * multiplier) ** 0.5 * wind_scaling_sqrt
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, ', '.join(repr(getattr(self, name)) for name in self.__slots__))
class NewtonRideDataPaused(StructType, namedtuple('NewtonRideDataPaused', 'tag newton_time unknown_3')):
SHAPE = '<6s8sb'
@staticmethod
def _decode(tag, newton_time_raw, unknown_3):
return (tag, NewtonTime.from_binary(newton_time_raw), unknown_3)
def _encode(self):
return (self.tag, self.newton_time.to_binary(), self.unknown_3)
RIDE_FIELDS = [
('unknown_0', 'h', IDENTITY, IDENTITY, 17), # byte 0 -- 0x1100 observed
('size', 'i', IDENTITY, IDENTITY, 0), # byte 2
('total_mass_lb', 'f', IDENTITY, IDENTITY, 235), # byte 6, always integer?!, could be total mass
('energy_kJ', 'f', IDENTITY, IDENTITY, 0), # byte 10
('aero', 'f', IDENTITY, IDENTITY, 0.384), # byte 14
('fric', 'f', IDENTITY, IDENTITY, 12.0), # byte 18
('initial_elevation_feet', 'f', IDENTITY, IDENTITY, 0), # byte 22, always integer?!
('elevation_gain_feet', 'f', IDENTITY, IDENTITY, 0), # byte 26, always integer?!
('wheel_circumference_mm', 'f', IDENTITY, IDENTITY, 2136.0), # byte 30, always integer?!
('unknown_1', 'h', IDENTITY, IDENTITY, 15), # byte 34, 0x0f00 and 0x0e00 and 0x0e00 observed; multiplying by 10 does nothing observable. TODO is this ftp per kilo ish?
('unknown_2', 'h', IDENTITY, IDENTITY, 1), # byte 36, =1?
('start_time', '8s', NewtonTime.from_binary, NewtonTime.to_binary, NewtonTime(0, 0, 0, 1, 1, 31, 2000)), # byte 38
('pressure_Pa', 'i', IDENTITY, IDENTITY, 101325), # byte 46, appears to be pressure in Pa (observed range 100121-103175) # (setting, reported) = [(113175, 1113), (103175, 1014), (93175, 915), (203175, 1996), (1e9, 9825490), (2e9, 19650979), (-2e9, -19650979)]. Reported value in Isaac (hPa) is this divided by ~101.7761 or multiplied by 0.00982549. This isn't affected by truncating the ride at all. It /is/ affected by unknown_3; if I make unknown_3 -73 from 73, I get (-2e9, -19521083).
('Cm', 'f', IDENTITY, IDENTITY, 1.0204), # byte 50
# average_temperature_farenheit = Average of temperature records. Does not affect displayed temperature in Isaac. It affects displayed pressure in Isaac (bigger temp = closer to pressure_Pa).
# pressure_Pa = 103175
# average_temperature_farenheit = 1, pressure = 1011mbar
# average_temperature_farenheit = 100, pressure = 1015mbar
# average_temperature_farenheit = 10000, pressure = 1031mbar
# pressure_Pa = 1e9
# average_temperature_farenheit = 1, pressure = 9798543mbar
# average_temperature_farenheit = 100, pressure = 9833825mbar
# average_temperature_farenheit = 10000, pressure = 9991024mbar
('average_temperature_farenheit', 'h', IDENTITY, IDENTITY, 73), # byte 54.
('wind_scaling_sqrt', 'f', IDENTITY, IDENTITY, 1.0), # byte 56
('riding_tilt_times_10', 'h', IDENTITY, IDENTITY, 0.0), # byte 60
('cal_mass_lb', 'h', IDENTITY, IDENTITY, 235), # byte 62
('unknown_5', 'h', IDENTITY, IDENTITY, 88), # byte 64, 0x5800 and 0x6000 and 0x5c00 observed; multiplying by 10 doesn't affect: wind speed, pressure, temperature.
('wind_tube_pressure_offset', 'h', lambda x: x - 1024, lambda x: x + 1024, 620), # byte 66, this is a 10-bit signed negative number cast to unsigned and stored in a 16 bit int...
('unknown_7', 'i', IDENTITY, IDENTITY, 0), # byte 68, 0x00000000 observed
('reference_temperature_kelvin', 'h', IDENTITY, IDENTITY, 288), # byte 72, normally 288 (14.85C)
('reference_pressure_Pa', 'i', IDENTITY, IDENTITY, 101325), # byte 74
('unknown_9', 'h', IDENTITY, IDENTITY, 1), # byte 78 -- 0x0100 observed
('unknown_a', 'h', IDENTITY, IDENTITY, 50), # byte 80 -- 0x3200 observed
# byte 82
]
RIDE_DECODE = zip(*RIDE_FIELDS)[2]
RIDE_ENCODE = zip(*RIDE_FIELDS)[3]
RIDE_DEFAULTS = {key: value for key, _, _, _, value in RIDE_FIELDS}
class NewtonRide(StructListType, namedtuple('NewtonRide', zip(*RIDE_FIELDS)[0] + ('records',))):
SHAPE = '<' + ''.join(zip(*RIDE_FIELDS)[1])
RECORD_TYPE = NewtonRideData
@classmethod
def make(cls, data, **kwargs):
kwargs = {}
assert 'size' not in kwargs
assert 'records' not in kwargs
for name in cls._fields[:-1]:
kwargs[name] = RIDE_DEFAULTS[name]
kwargs['records'] = data
kwargs['size'] = len(data)
if data:
# TODO start_time, elevation gain
kwargs['average_temperature_farenheit'] = int(round(sum(x.temperature_farenheit for x in data if hasattr(x, 'temperature_farenheit')) / len(data)))
kwargs['initial_elevation_feet'] = [x.elevation_feet for x in data if hasattr(x, 'elevation_feet')][0]
kwargs['data_records'] = len(data)
kwargs['energy_kJ'] = int(round(sum(x.power_watts for x in data if hasattr(x, 'power_watts')) / 1000))
args = []
for name in cls._fields:
args.append(kwargs[name])
return cls(*args)
def _encode(self):
return tuple(encode(val) for val, encode in zip(self[:-1], RIDE_ENCODE))
@staticmethod
def _decode(*args):
return tuple(decode(val) for val, decode in zip(args, RIDE_DECODE))
def get_header(self):
return NewtonRideHeader(self.unknown_0, self.start_time, sum(x.speed_mph * 1602 / 3600. for x in self.records if isinstance(x, NewtonRideData)))
def fit_to(self, csv):
pure_records = [x for x in self.records if not hasattr(x, 'newton_time')]
csv_data = [float(x['Wind Speed (km/hr)']) for x in csv.data]
compare = [(x, y) for x, y in zip(pure_records, csv_data) if y > 0]
reference_pressure_kPa = self.reference_pressure_Pa / 1000.0
get_errors = lambda offset, multiplier: [pure_record.wind_speed_kph(offset, multiplier, reference_pressure_kPa, self.reference_temperature_kelvin, self.wind_scaling_sqrt) - csv_datum for pure_record, csv_datum in compare]
dirs = [(x, y) for x in range(-1, 2) for y in range(-1, 2) if x != 0 or y != 0]
print dirs
skip = 500
best = current = (500, 10)
best_error = float('inf')
while skip > 0.000001:
new_best = False
for x, y in dirs:
test = (current[0] + x * skip, current[1] + y * skip * 0.02)
if test[1] < 0:
continue
error = sum(map(abs, get_errors(*test)))
#print test, error
if error < best_error:
best = test
best_error = error
new_best = True
if new_best:
current = best
else:
skip *= 0.5
#print best, skip, best_error
errors = get_errors(*best)
return best, best_error, max(map(abs, errors)), ["%0.4f" % (x,) for x in errors]
def fit_elevation(self, csv):
pure_records = [x for x in self.records if not hasattr(x, 'newton_time')]
csv_data = [float(x['Elevation (meters)']) / 0.3048 for x in csv.data]
compare = [(x, y) for x, y in zip(pure_records, csv_data)]
get_errors = lambda mul: [(pure_record.density(), pure_record.elevation_feet, csv_datum, pure_record.elevation_feet - csv_datum, (pure_record.wind_tube_pressure_difference - self.wind_tube_pressure_offset), pure_record.tilt, pure_record.unknown_0, pure_record) for pure_record, csv_datum in compare]
return get_errors(0.1)
class NewtonRideHeader(StructType, namedtuple('NewtonRideHeader', 'unknown_0 start_time distance_metres')):
# \x11\x00
# newton time
# float encoding of ride length in metres.
SHAPE = '<h8sf'
def _encode(self):
return (self.unknown_0, self.start_time.to_binary(), self.distance_metres)
@classmethod
def _decode(cls, unknown_0, start_time_raw, distance_metres):
return (unknown_0, NewtonTime.from_binary(start_time_raw), distance_metres)
def to_filename(self):
return "powerpod.%s-%0.1fkm.raw" % (self.start_time.as_datetime().strftime("%Y-%m-%dT%H-%M-%S"), self.distance_metres / 1000)
class NewtonProfileScreens(StructType):
# Data is laid out as [LEFT, RIGHT]
# Sides are [AGG1, AGG2, AGG3]
# Aggregates are [TOP, MIDDLE, BOTTOM]
# Meaning of indices in metrics
# (unverified, but 'average' is (1, 2, 1) and plain is (0, 2, 1))
AGG_NOW = 0
#AGG_TRIP = 1
AGG_AVG = 2
# Metrics (PowerPod 6.12)
METRIC_SPEED = (0, 2, 1)
METRIC_DISTANCE_POWER = (3, 5, 4)
METRIC_TIME = (6, 6, 6) # I guess no point in anything but 'trip'
METRIC_POWER = (7, 9, 8)
METRIC_OTHER = (10, 12, 11)
METRIC_SLOPE = (13, 15, 14)
METRIC_WIND = (16, 18, 17)
METRIC_BLANK = (19, 22, 20)
METRIC_NORMALISED_POWER = (21, 21, 21) # I guess no point in anything but 'trip'
# Which metrics are valid on which screens?
VALID_TOP = set([METRIC_SPEED, METRIC_WIND, METRIC_SLOPE, METRIC_POWER])
# Add averages.
VALID_TOP.update((z, y, z) for _x, y, z in list(VALID_TOP))
VALID_TOP.add(METRIC_BLANK)
VALID_MIDDLE = set([METRIC_POWER, METRIC_DISTANCE_POWER, METRIC_NORMALISED_POWER, METRIC_WIND, METRIC_BLANK])
VALID_BOTTOM = set([METRIC_TIME, METRIC_OTHER])
VALID = (VALID_BOTTOM, VALID_MIDDLE, VALID_TOP)
# Screens
TOP = 0
MIDDLE = 1
BOTTOM = 2
ROWS = 3
# Sides
LEFT = 0
RIGHT = 1
SIDES = 2
# Any triple is (Now, Trip, Average)
IDENTIFIER = 0x29
SHAPE = 'b' * 18
RESPONSE = None
def __init__(self, data):
self._data = list(data)
@classmethod
def _decode(cls, *args):
return args,
def _encode(self):
return self._data
def set_screen(self, side, row, metric, aggregate):
assert 0 <= side < self.SIDES, side
assert 0 <= row < self.ROWS, row
assert metric in self.VALID[row], (metric, row)
assert aggregate in (self.AGG_AVG, self.AGG_NOW), aggregate
metric = [metric[x] for x in (aggregate, 1, 2)]
for metric_idx in (0, 1, 2):
self._data[self._index(side, row, metric_idx)] = metric[metric_idx]
def to_dict(self):
sides = {}
for side_i, side_n in enumerate(['left', 'right']):
side = sides[side_n] = {}
for row_i, row_n in enumerate(['top', 'middle', 'bottom']):
row = side[row_n] = []
for metric_idx in (0, 1, 2):
row.append(self._data[self._index(side_i, row_i, metric_idx)])
return sides
def __repr__(self):
return "{}.from_dict({})".format(self.__class__.__name__, self.to_dict())
@classmethod
def from_dict(cls, sides):
data = [0] * 18
for side_i, side_n in enumerate(['left', 'right']):
side = sides[side_n]
for row_i, row_n in enumerate(['top', 'middle', 'bottom']):
row = side[row_n]
for metric_idx, value in enumerate(row):
data[cls._index(side_i, row_i, metric_idx)] = value
return cls(data)
@classmethod
def _index(cls, side, row, metric_idx):
return (side * 3 + metric_idx) * cls.ROWS + row
@classmethod
def default(cls):
return cls.from_dict({
'left': {
'top': cls.METRIC_SPEED,
'middle': cls.METRIC_DISTANCE_POWER,
'bottom': cls.METRIC_TIME,
},
'right': {
'top': cls.METRIC_SPEED,
'middle': cls.METRIC_POWER,
'bottom': cls.METRIC_OTHER,
}
})
| bsd-2-clause | -8,511,836,035,023,288,000 | 38.230047 | 489 | 0.65079 | false |
snd/dejavu | recognize.py | 1 | 1047 | import sys
import warnings
warnings.filterwarnings("ignore")
import argparse
import timeit
from dejavu import Dejavu
from dejavu.timer import Timer
from dejavu.recognize import FileRecognizer
parser = argparse.ArgumentParser()
parser.add_argument("file", help="the file to recognize")
parser.add_argument(
"-s",
"--secs",
help="how many seconds to fingerprint for recognition",
type=int)
args = parser.parse_args()
# load config from a JSON file (or anything outputting a python dictionary)
config = {
"database": {
"host": "127.0.0.1",
"user": "root",
"passwd": "",
"db": "dejavu"
}
}
if args.secs:
config["fingerprint_limit"] = args.secs
if __name__ == '__main__':
# create a Dejavu instance
djv = Dejavu(config)
# Recognize audio from a file
print("start recognizing")
with Timer("djv.recognize") as t:
song = djv.recognize(FileRecognizer, args.file)
print("From file we recognized: %s\n" % song)
| mit | -60,996,768,827,672,870 | 22.266667 | 75 | 0.627507 | false |
mgautierfr/ediap | libs/painter/__init__.py | 1 | 3074 | # This file is part of Edia.
#
# Ediap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Edia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Edia. If not, see <http://www.gnu.org/licenses/>
# Copyright 2014 Matthieu Gautier [email protected]
import tkinter
from .actors import *
import language.nodes
from .nodes import *
class ConstantColor(language.nodes.Node):
def __init__(self, r, v, b):
language.nodes.Node.__init__(self)
r = min(max(r, 0), 255)
v = min(max(v, 0), 255)
b = min(max(b, 0), 255)
self.value = "#%02x%02x%02x"%(r,v,b)
self.opositeColor = "#%02x%02x%02x"%(255-r,255-v,255-b)
def depend(self):
return set([self])
def get_value(self):
return self.value
builtins = {
'draw_line' : draw_line,
'draw_rectangle' : draw_rectangle,
'draw_ellipse' : draw_ellipse,
'draw_quad' : draw_quad,
'draw_triangle' : draw_triangle,
'change_color' : change_color
}
constants = {
'red' : ConstantColor(255, 0, 0)
}
nodes = {
'Color' : Color
}
class Context:
def __init__(self, other=None):
if other is None:
self.fillColor = language.nodes.Value("#000000")
self.fillColor.opositColor = "#FFFFFF"
self.shapes = []
else:
self.fillColor = other.fillColor
self.shapes = other.shapes[:]
def __str__(self):
return "<PainterContext\n%s\n%s>"%(self.fillColor, self.shapes)
class ContextShower(tkinter.Frame):
def __init__(self, parent):
tkinter.Frame.__init__(self, parent)
self.canvas = tkinter.Canvas(self, bg="white")
self.canvas['height'] = self.canvas['width']
self.canvas.pack(side="top")
self.canvasState = tkinter.ttk.Treeview(self, columns=('value',))
self.canvasState['height'] = 1
self.canvasState.pack()
def delete(self, what):
self.canvas.delete(what)
def draw(self, context, token, shape_=True):
for shape in context.shapes:
if shape_:
shape.draw(self.canvas)
elif shape_ is False:
shape.update(self.canvas)
if token in shape.depend():
shape.draw_helper(token, self.canvas)
def update_hiddenstate(self, context):
for child in self.canvasState.get_children():
self.canvasState.delete(child)
value = context.fillColor()
self.canvasState.insert("", "end", "fillColor", text="fillColor", value=value, tags=("fillColor",))
self.canvasState.tag_configure("fillColor", background=value, foreground=context.fillColor.opositColor)
| gpl-2.0 | 42,613,606,250,163,280 | 30.690722 | 111 | 0.634027 | false |
gratefulfrog/ArduGuitar | Ardu2/design/POC-3_MAX395/pyboard/V1_WithHMI/pyboard_no_debug/Presets.py | 1 | 13399 | import csv
from state import State
# 2016 12 25: changed saveCurrentConfigAsPreset to allow for both save to disk and not to disk
# this is where I put all the preset configs
"""
Each config is a dictionary of this form:
currentDict = {'Name': 'EasyMusic','M' : [0,0],'A' : [0,0],'B' : [0,0],'C' : [0,0],'D' : [0,0],'TR' : [None,0],'S' : '(|(+A(|BC)D)','TREM' : 0,'VIB' : 0}
"""
# configs is a dictionary with key (hs,vs) where hs is horizontal selector pos, and vs is vertical selector pos
"""
Configs = {(0,0): {'Name':'(0,0)','M' : [0,0],'A' : [0,0],'B' : [0,0],'C' : [0,0],'D' : [0,0],'TR' : [None,0],'S' : '(|(Ab)','TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : 0},
(1,0): {'Name':'(1,0)','M' : [1,0],'A' : [1,0],'B' : [1,0],'C' : [1,0],'D' : [1,0],'TR' : [None,0],'S' : '(|BA)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : 1},
(2,0): {'Name':'(2,0)','M' : [2,0],'A' : [2,0],'B' : [2,0],'C' : [2,0],'D' : [2,0],'TR' : [None,0],'S' : '(|CA)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : 4},
(3,0): {'Name':'(3,0)','M' : [3,0],'A' : [3,0],'B' : [3,0],'C' : [3,0],'D' : [3,0],'TR' : [None,0],'S' : '(|DA)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : 3},
(4,0): {'Name':'(4,0)','M' : [4,0],'A' : [4,0],'B' : [4,0],'C' : [4,0],'D' : [4,0],'TR' : [None,0],'S' : '(|dA)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : 2},
(0,1): {'Name':'(0,1)','M' : [0,1],'A' : [0,1],'B' : [0,1],'C' : [0,1],'D' : [0,1],'TR' : [None,0],'S' : '(|AB)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : 5},
(1,1): {'Name':'(1,1)','M' : [1,1],'A' : [1,1],'B' : [1,1],'C' : [1,1],'D' : [1,1],'TR' : [None,0],'S' : '(|Bc)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(2,1): {'Name':'(2,1)','M' : [2,1],'A' : [2,1],'B' : [2,1],'C' : [2,1],'D' : [2,1],'TR' : [None,0],'S' : '(|CB)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(3,1): {'Name':'(3,1)','M' : [3,1],'A' : [3,1],'B' : [3,1],'C' : [3,1],'D' : [3,1],'TR' : [None,0],'S' : '(|DB)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(4,1): {'Name':'(4,1)','M' : [4,1],'A' : [4,1],'B' : [4,1],'C' : [4,1],'D' : [4,1],'TR' : [None,0],'S' : '(|dB)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(0,2): {'Name':'(0,2)','M' : [0,2],'A' : [0,2],'B' : [0,2],'C' : [0,2],'D' : [0,2],'TR' : [None,0],'S' : '(|AC)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(1,2): {'Name':'(1,2)','M' : [1,2],'A' : [1,2],'B' : [1,2],'C' : [1,2],'D' : [1,2],'TR' : [None,0],'S' : '(|BC)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(2,2): {'Name':'(2,2)','M' : [2,2],'A' : [2,2],'B' : [2,2],'C' : [2,2],'D' : [2,2],'TR' : [None,0],'S' : '(|Cd)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(3,2): {'Name':'(3,2)','M' : [3,2],'A' : [3,2],'B' : [3,2],'C' : [3,2],'D' : [3,2],'TR' : [None,0],'S' : '(|DC)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(4,2): {'Name':'(4,2)','M' : [4,2],'A' : [4,2],'B' : [4,2],'C' : [4,2],'D' : [4,2],'TR' : [None,0],'S' : '(|dC)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(0,3): {'Name':'(0,3)','M' : [0,3],'A' : [0,3],'B' : [0,3],'C' : [0,3],'D' : [0,3],'TR' : [None,0],'S' : '(|AD)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(1,3): {'Name':'(1,3)','M' : [1,3],'A' : [1,3],'B' : [1,3],'C' : [1,3],'D' : [1,3],'TR' : [None,0],'S' : '(|BD)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(2,3): {'Name':'(2,3)','M' : [2,3],'A' : [2,3],'B' : [2,3],'C' : [2,3],'D' : [2,3],'TR' : [None,0],'S' : '(|CD)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(3,3): {'Name':'(3,3)','M' : [3,3],'A' : [3,3],'B' : [3,3],'C' : [3,3],'D' : [3,3],'TR' : [None,0],'S' : '(|Da)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(4,3): {'Name':'(4,3)','M' : [4,3],'A' : [4,3],'B' : [4,3],'C' : [4,3],'D' : [4,3],'TR' : [None,0],'S' : '(|da)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(0,4): {'Name':'(0,4)','M' : [0,4],'A' : [0,4],'B' : [0,4],'C' : [0,4],'D' : [0,4],'TR' : [None,0],'S' : '(|Ad)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(1,4): {'Name':'(1,4)','M' : [1,4],'A' : [1,4],'B' : [1,4],'C' : [1,4],'D' : [1,4],'TR' : [None,0],'S' : '(|Bd)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(2,4): {'Name':'(2,4)','M' : [2,4],'A' : [2,4],'B' : [2,4],'C' : [2,4],'D' : [2,4],'TR' : [None,0],'S' : '(|Cd)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''},
(3,4): {'Name':'(3,4)','M' : [3,4],'A' : [3,4],'B' : [3,4],'C' : [3,4],'D' : [3,4],'TR' : [None,0],'S' : '(|Da)', 'TREM' : 1,'VIB' : 1, 'AUX0' : 1, 'AUX1' : 1, 'SEQ : ''},
(4,4): {'Name':'(4,4)','M' : [4,4],'A' : [4,4],'B' : [4,4],'C' : [4,4],'D' : [4,4],'TR' : [None,0],'S' : '(|Da)', 'TREM' : 0,'VIB' : 0, 'AUX0' : 0, 'AUX1' : 0, 'SEQ : ''}
}
"""
class Preset():
class Sequencer():
def __init__(self,presetDict,seqKey):
# seqLis is an ordered list of ((key,index),(key,index)
# where key is a key from the presetDict, eg. (0,0) and index is an int.
self.seqLis = []
for k in presetDict.keys():
index = presetDict[k][seqKey]
if index != '':
self.seqLis.append((k,index))
self.seqLis.sort(key=lambda pr: pr[1])
self.seqLen = len(self.seqLis)
self.seqStartKey = self.seqLis[0][0] # the tuple that is the key to the initial seq preset
self.reset()
def reset(self):
self.nextIndex = self.seqLis[1][1]
def nextKey(self):
rNext = self.nextIndex
self.nextIndex = (self.nextIndex+1)%self.seqLen
return self.seqLis[rNext][0]
def __init__(self,pyGuitarConf,fileName=None):
# the fileName is used to load a presets file, if one exists,
# if not, one is created
self.conf = pyGuitarConf
self.presets = {}
if fileName==None:
self.filePath = self.conf.LocalConf.presetDir +\
self.conf.LocalConf.dirSeparator + \
self.conf.LocalConf.presetFileName
else:
self.filePath = fileName
#State.printT ("creating preset instance from:\t" + self.filePath)
#print("creating preset instance from:\t" + self.filePath)
try:
#print(self.filePath)
with open(self.filePath, 'r') as csvfile:
# from official csv module, not used on pyboard!
# reader = csv.DictReader(csvfile,fieldnames = self.conf.Vocab.headings,delimiter=',')
# self.header = reader.next()
# the next lines refer to my version of the csv reader
reader = csv.CSV.Reader(csvfile)
self.header = next(reader)
for row in reader:
if len(row)>2:
self.rowDict2confDict(row)
if len(self.presets) ==0:
raise Exception("read failure")
except Exception as e:
#print('reading csv file threw exception: ', type(e), 'Args: ', e.args)
#State.printT( "error reading preset file! Creating new one!")
#print( "error reading preset file! Creating new one!")
self.createDefaultPresets()
self.currentDict = {}
#print(self.presets)
#print(self.presets[(0,0)])
for k in self.presets[(0,0)].keys():
self.currentDict[k] = self.presets[(0,0)][k]
self.seq=Preset.Sequencer(self.presets,pyGuitarConf.vocab.configKeys[12])
def rowDict2confDict(self,row):
#print(row)
curConfDict= {}
self.presets[(int(row[0]),int(row[1]))]= curConfDict
curConfDict[self.conf.Vocab.configKeys[0]] = row[2]
for i in range(1,6):
curConfDict[self.conf.Vocab.configKeys[i]] = [int(row[1+2*i]),int(row[2+2*i])]
curConfDict[self.conf.Vocab.configKeys[6]] = [None,int(row[13])]
curConfDict[self.conf.Vocab.configKeys[7]] = row[14]
j=15
for k in self.conf.Vocab.configKeys[8:]:
if row[j] != '':
curConfDict[k] = int(row[j])
else:
curConfDict[k] = row[j]
j+=1
#print(curConfDict)
"""
def createDefaultPresets(self):
# this will create a default preset file in the default location
# with default content
for i in range(5):
for j in range(5):
self.presets[(i,j)] = self.conf.presetConf.defaultConfDict
self.header = self.conf.Vocab.headings
#self.toFile(self.conf.presetFileName)
"""
def createDefaultPresets(self):
"""
this will create a default preset dictionary
with default content
and attempt to save to SD,
in case of write failure, message to console and continue anyway!
"""
import defaultPresets
self.presets= defaultPresets.defPresetDict
self.header = self.conf.Vocab.headings
try:
self.toFile()
except Exception as e:
#print('Writing csv file raised exception: ', type(e), 'Args: ', e.args)
pass
def toFile(self, file = None):
"""
this will write the presets to a file,
if a file argument is provided it is used and it
updates the instance filePath
otherwise the current instance filePath is used
"""
if file:
self.filePath = file
with open(self.filePath, 'w') as csvfile:
writer = csv.CSV.Writer(csvfile)
#print(self.header)
writer.writeRow(self.header)
for p in self.presets.keys():
rowDict = self.confDict2RowDict(p,self.presets[p])
#print(rowDict)
rawRow = [rowDict[k] for k in self.header]
#print(rawRow)
writer.writeRow(rawRow)
#State.printT( "Wrote file:\t" + self.filePath)
#print( "Wrote file:\t" + self.filePath)
def makeRawRowWorkAround(self,rowDict):
res = []
for badKey in self.header:
for goodKey in rowDict.keys():
if badKey==goodKey:
res.append(rowDict[goodKey])
break
return res
def confDict2RowDict(self,key,conf):
curRowDict= {}
# horiz and verti
for i in range(2):
curRowDict[self.conf.Vocab.headings[i]] = key[i]
#Name
curRowDict[self.conf.Vocab.headings[2]] = conf[self.conf.Vocab.headings[2]]
#M,A,B,C,D vol and tone
for i in range(1,6):
curRowDict[self.conf.Vocab.headings[1+2*i]] = conf[self.conf.Vocab.configKeys[i]][0]
curRowDict[self.conf.Vocab.headings[2+2*i]] = conf[self.conf.Vocab.configKeys[i]][1]
# TneRange
curRowDict[self.conf.Vocab.headings[13]] = conf[self.conf.Vocab.configKeys[6]][1]
#S,TREM,VIB,AUX0,AUX1
for k in self.conf.Vocab.headings[14:]:
curRowDict[k] = conf[k]
return curRowDict
def add(self,name,vDict):
# add this to the presets, if the vDict is proper length:
# and no keys are wrong
# print "preset.add(",name,vDict,")", self.header
newDict = {}
for k in vDict.keys():
newDict[k] = vDict[k]
self.presets[name]=newDict
#print(self.presets[name])
def remove(self,name):
# just remove it or do nothing if not possible
if name in self.presets.keys():
del self.presets[name]
def rename(self,old,new):
# to rename a preset, we create a new dict copied from previous one
# put it in with the new name
# and remove the reference to the old name
# if the old name is not found, do nothing
# return True if success, False otherwise
# print "renaming preset: " + old + " to: " + new
res = False
if old in self.presets.keys():
newDict = {}
for k in self.presets[old].keys():
newDict[k] = self.presets[old][k]
self.presets[new] = newDict
del self.presets[old]
res = True
return res
"""
# changed 2016 12 25 to save current edited conf as preset but not to disk
def saveCurrentConfigAsPreset(self, key):
self.currentDict[self.conf.vocab.configKeys[11]] = 0
self.add(key,self.currentDict)
#print(self.presets)
self.toFile()
"""
def saveCurrentConfigAsPreset(self, key, saveToDisk = True):
self.currentDict[self.conf.vocab.configKeys[11]] = 0
self.add(key,self.currentDict)
#print(self.presets)
if saveToDisk:
self.toFile()
| gpl-2.0 | 4,316,819,819,730,568,000 | 54.139918 | 183 | 0.462646 | false |
pfouque/deezer-python | deezer/client.py | 1 | 7891 | """
Implements a client class to query the
`Deezer API <http://developers.deezer.com/api>`_
"""
import json
try: # pragma: no cover - python 2
from urllib import urlencode
from urllib2 import urlopen
except ImportError: # pragma: no cover - python 3
from urllib.parse import urlencode
from urllib.request import urlopen
from deezer.resources import Album, Artist, Comment, Genre
from deezer.resources import Playlist, Radio, Track, User
from deezer.resources import Chart, Resource
class Client(object):
"""
A client to retrieve some basic infos about Deezer resourses.
Create a client instance with the provided options. Options should
be passed in to the constructor as kwargs.
>>> import deezer
>>> client = deezer.Client(app_id='foo', app_secret='bar')
This client provides several method to retrieve the content of most
sort of Deezer objects, based on their json structure.
"""
use_ssl = True
host = "api.deezer.com"
objects_types = {
'album': Album,
'artist': Artist,
'comment': Comment,
'editorial': None,
# 'folder': None, # need identification
'genre': Genre,
'playlist': Playlist,
'radio': Radio,
'search': None,
'track': Track,
'user': User,
'chart' : Chart
}
def __init__(self, **kwargs):
super(Client, self).__init__()
self.use_ssl = kwargs.get('use_ssl', self.use_ssl)
self.host = kwargs.get('host', self.host)
self.options = kwargs
self._authorize_url = None
self.app_id = kwargs.get('app_id')
self.app_secret = kwargs.get('app_secret')
self.access_token = kwargs.get('access_token')
def _process_json(self, item, parent=None):
"""
Recursively convert dictionary
to :class:`~deezer.resources.Resource` object
:returns: instance of :class:`~deezer.resources.Resource`
"""
if 'data' in item:
return [self._process_json(i, parent) for i in item['data']]
result = {}
for key, value in item.items():
if isinstance(value, dict) and ('type' in value or 'data' in value):
value = self._process_json(value, parent)
result[key] = value
if parent is not None and hasattr(parent, 'type'):
result[parent.type] = parent
if 'type' in result:
object_class = self.objects_types.get(result['type'], Resource)
else:
object_class = self.objects_types.get(parent, Resource)
return object_class(self, result)
@staticmethod
def make_str(value):
"""
Convert value to str in python2 and python3 compatible way
:returns: str instance
"""
try: # pragma: no cover - python 3
value = str(value)
except UnicodeEncodeError: # pragma: no cover - python 2
value = value.encode('utf-8')
return value
@property
def scheme(self):
"""
Get the http prefix for the address depending on the use_ssl attribute
"""
return self.use_ssl and 'https' or 'http'
def url(self, request=''):
"""Build the url with the appended request if provided."""
if request.startswith('/'):
request = request[1:]
return "{0}://{1}/{2}".format(self.scheme, self.host, request)
def object_url(self, object_t, object_id=None, relation=None, **kwargs):
"""
Helper method to build the url to query to access the object
passed as parameter
:raises TypeError: if the object type is invalid
"""
if object_t not in self.objects_types:
raise TypeError("{0} is not a valid type".format(object_t))
request_items = (object_t, object_id, relation)
request_items = (item for item in request_items if item is not None)
request_items = (str(item) for item in request_items)
request = '/'.join(request_items)
base_url = self.url(request)
if kwargs:
for key, value in kwargs.items():
if not isinstance(value, str):
kwargs[key] = self.make_str(value)
result = '{0}?{1}'.format(base_url, urlencode(kwargs))
else:
result = base_url
return result
def get_object(self, object_t, object_id=None, relation=None, parent=None,
**kwargs):
"""
Actually query the Deezer API to retrieve the object
:returns: json dictionary
"""
url = self.object_url(object_t, object_id, relation, **kwargs)
response = urlopen(url)
resp_str = response.read().decode('utf-8')
response.close()
jsn = json.loads(resp_str)
return self._process_json(jsn, parent)
def get_chart(self, relation=None, **kwargs):
"""
Get chart
:returns: a list of :class:`~deezer.resources.Resource` objects.
"""
return self.get_object("chart", object_id='0', relation=relation,
parent="chart", **kwargs)
def get_album(self, object_id, relation=None, **kwargs):
"""
Get the album with the provided id
:returns: an :class:`~deezer.resources.Album` object
"""
return self.get_object("album", object_id, relation=relation, **kwargs)
def get_artist(self, object_id, relation=None, **kwargs):
"""
Get the artist with the provided id
:returns: an :class:`~deezer.resources.Artist` object
"""
return self.get_object("artist", object_id, relation=relation, **kwargs)
def get_comment(self, object_id):
"""
Get the comment with the provided id
:returns: a :class:`~deezer.resources.Comment` object
"""
return self.get_object("comment", object_id)
def get_genre(self, object_id):
"""
Get the genre with the provided id
:returns: a :class:`~deezer.resources.Genre` object
"""
return self.get_object("genre", object_id)
def get_genres(self):
"""
:returns: a list of :class:`~deezer.resources.Genre` objects.
"""
return self.get_object("genre")
def get_playlist(self, object_id):
"""
Get the playlist with the provided id
:returns: a :class:`~deezer.resources.Playlist` object
"""
return self.get_object("playlist", object_id)
def get_radio(self, object_id=None):
"""
Get the radio with the provided id.
:returns: a :class:`~deezer.resources.Radio` object
"""
return self.get_object("radio", object_id)
def get_radios(self):
"""
Get a list of radios.
:returns: a list of :class:`~deezer.resources.Radio` objects
"""
return self.get_object("radio")
def get_radios_top(self):
"""
Get the top radios (5 radios).
:returns: a :class:`~deezer.resources.Radio` object
"""
return self.get_object("radio", relation="top")
def get_track(self, object_id):
"""
Get the track with the provided id
:returns: a :class:`~deezer.resources.Track` object
"""
return self.get_object("track", object_id)
def get_user(self, object_id):
"""
Get the user with the provided id
:returns: a :class:`~deezer.resources.User` object
"""
return self.get_object("user", object_id)
def search(self, query, relation=None, **kwargs):
"""
Search track, album, artist or user
:returns: a list of :class:`~deezer.resources.Resource` objects.
"""
return self.get_object("search", relation=relation, q=query, **kwargs)
| mit | -2,699,333,509,705,244,000 | 30.564 | 80 | 0.582816 | false |
gajim/gajim | win/misc/create-launcher.py | 1 | 5971 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Creates simple Python .exe launchers for gui and cli apps
./create-launcher.py "3.8.0" <target-dir>
"""
import os
import sys
import subprocess
import shlex
import tempfile
import shutil
import struct
from distutils.spawn import find_executable
def build_resource(rc_path, out_path):
"""Raises subprocess.CalledProcessError"""
def is_64bit():
return struct.calcsize("P") == 8
subprocess.check_call(
["windres", "-O", "coff", "-F",
"pe-x86-64" if is_64bit() else "pe-i386", rc_path,
"-o", out_path])
def get_build_args():
python_name = os.path.splitext(os.path.basename(sys.executable))[0]
python_config = os.path.join(
os.path.dirname(sys.executable), python_name + "-config")
cflags = subprocess.check_output(
["sh", python_config, "--cflags"]).strip()
libs = subprocess.check_output(
["sh", python_config, "--libs"]).strip()
cflags = os.fsdecode(cflags)
libs = os.fsdecode(libs)
return shlex.split(cflags) + shlex.split(libs)
def build_exe(source_path, resource_path, is_gui, out_path):
args = ["gcc", "-s"]
if is_gui:
args.append("-mwindows")
args.extend(["-o", out_path, source_path, resource_path])
args.extend(get_build_args())
subprocess.check_call(args)
def get_launcher_code(debug):
template = """\
#include "Python.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <shellapi.h>
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance,
LPSTR lpCmdLine, int nCmdShow)
{
int result;
LPWSTR *szArglist;
int nArgs;
int i;
szArglist = CommandLineToArgvW(GetCommandLineW(), &nArgs);
if( NULL == szArglist )
{
printf("CommandLineToArgvW failed");
return 0;
}
Py_NoUserSiteDirectory = 1;
Py_IgnoreEnvironmentFlag = 1;
Py_DontWriteBytecodeFlag = 1;
Py_FrozenFlag = 1;
Py_Initialize();
PySys_SetArgvEx(__argc, szArglist, 0);
result = PyRun_SimpleString(
"import sys; import os;"
"os.environ['GAJIM_DEBUG'] = '%s';"
"sys.frozen=True;"
"from pathlib import Path;"
"root_path = Path(sys.executable).parents[1];"
"from ctypes import windll;"
"windll.kernel32.SetDllDirectoryW(str(root_path / 'bin'));"
"from gajim import gajim;"
"gajim.main();");
Py_Finalize();
return result;
}
""" % int(debug)
return template
def get_resouce_code(filename, file_version, file_desc, icon_path,
product_name, product_version, company_name):
template = """\
1 ICON "%(icon_path)s"
1 VERSIONINFO
FILEVERSION %(file_version_list)s
PRODUCTVERSION %(product_version_list)s
FILEOS 0x4
FILETYPE 0x1
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904E4"
BEGIN
VALUE "CompanyName", "%(company_name)s"
VALUE "FileDescription", "%(file_desc)s"
VALUE "FileVersion", "%(file_version)s"
VALUE "InternalName", "%(internal_name)s"
VALUE "OriginalFilename", "%(filename)s"
VALUE "ProductName", "%(product_name)s"
VALUE "ProductVersion", "%(product_version)s"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1252
END
END
"""
def to_ver_list(v):
v = v.split("-")[0]
return ",".join(map(str, (list(map(int, v.split("."))) + [0] * 4)[:4]))
file_version_list = to_ver_list(file_version)
product_version_list = to_ver_list(product_version)
return template % {
"icon_path": icon_path, "file_version_list": file_version_list,
"product_version_list": product_version_list,
"file_version": file_version, "product_version": product_version,
"company_name": company_name, "filename": filename,
"internal_name": os.path.splitext(filename)[0],
"product_name": product_name, "file_desc": file_desc,
}
def build_launcher(out_path, icon_path, file_desc, product_name, product_version,
company_name, is_gui, debug=False):
src_ico = os.path.abspath(icon_path)
target = os.path.abspath(out_path)
file_version = product_version
dir_ = os.getcwd()
temp = tempfile.mkdtemp()
try:
os.chdir(temp)
with open("launcher.c", "w") as h:
h.write(get_launcher_code(debug))
shutil.copyfile(src_ico, "launcher.ico")
with open("launcher.rc", "w") as h:
h.write(get_resouce_code(
os.path.basename(target), file_version, file_desc,
"launcher.ico", product_name, product_version, company_name))
build_resource("launcher.rc", "launcher.res")
build_exe("launcher.c", "launcher.res", is_gui, target)
finally:
os.chdir(dir_)
shutil.rmtree(temp)
def main():
argv = sys.argv
version = argv[1]
target = argv[2]
company_name = "Gajim"
misc = os.path.dirname(os.path.realpath(__file__))
build_launcher(
os.path.join(target, "Gajim.exe"),
os.path.join(misc, "gajim.ico"), "Gajim", "Gajim",
version, company_name, True)
build_launcher(
os.path.join(target, "Gajim-Debug.exe"),
os.path.join(misc, "gajim.ico"), "Gajim", "Gajim",
version, company_name, False, debug=True)
# build_launcher(
# os.path.join(target, "history_manager.exe"),
# os.path.join(misc, "gajim.ico"), "History Manager", "History Manager",
# version, company_name, 'history_manager.py', True)
if __name__ == "__main__":
main()
| gpl-3.0 | -4,010,021,185,229,620,700 | 27.706731 | 81 | 0.610283 | false |
geometalab/Vector-Tiles-Reader-QGIS-Plugin | plugin/util/global_map_tiles.py | 1 | 14349 | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
import math
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.2572235630016,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tileSize, ty * self.tileSize, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tileSize, (ty + 1) * self.tileSize, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2 ** zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i - 1 if i != 0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2 ** zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2 ** zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
# ---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tileSize=256):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180 / 256.0 / 2 ** zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180 / 256.0 / 2 ** zoom
# return 180 / float( 1 << (8+zoom) )
def TileBounds(tx, ty, zoom):
"Returns bounds of the given tile"
res = 180 / 256.0 / 2 ** zoom
return (tx * 256 * res - 180, ty * 256 * res - 90, (tx + 1) * 256 * res - 180, (ty + 1) * 256 * res - 90)
| gpl-2.0 | 6,512,009,764,957,224,000 | 40.351585 | 113 | 0.621298 | false |
droodle/kansha | kansha/card/comp.py | 1 | 12065 | # -*- coding:utf-8 -*-
# --
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
import dateutil.parser
from nagare import component, log, security
from nagare.i18n import _, _L
from .models import DataCard
from ..checklist import comp as checklist
from ..label import comp as label
from ..comment import comp as comment
from ..vote import comp as vote
from ..description import comp as description
from ..due_date import comp as due_date
from ..title import comp as title
from ..user import usermanager
from .. import exceptions, notifications
from ..toolbox import overlay
from ..gallery import comp as gallery
from nagare import editor
from nagare import validator
# WEIGHTING TYPES
WEIGHTING_OFF = 0
WEIGHTING_FREE = 1
WEIGHTING_LIST = 2
class NewCard(object):
"""New card component
"""
def __init__(self, column):
self.column = column
self.needs_refresh = False
def toggle_refresh(self):
self.needs_refresh = not self.needs_refresh
class Card(object):
"""Card component
"""
max_shown_members = 3
def __init__(self, id_, column, assets_manager, data=None):
"""Initialization
In:
- ``id_`` -- the id of the card in the database
- ``column`` -- father
"""
self.db_id = id_
self.id = 'card_' + str(self.db_id)
self.column = column
self.assets_manager = assets_manager
self._data = data
self.reload(data if data else self.data)
@property
def board(self):
return self.column.board
def reload(self, data=None):
"""Refresh the sub components
"""
data = data if data else self.data
self.title = component.Component(CardTitle(self))
self.checklists = component.Component(checklist.Checklists(self))
self.description = component.Component(CardDescription(self))
self.due_date = component.Component(due_date.DueDate(self))
self.gallery = component.Component(gallery.Gallery(self, self.assets_manager))
self.comments = component.Component(comment.Comments(self, data.comments))
self.flow = component.Component(CardFlow(self, self.comments, self.gallery))
self.labels = component.Component(label.CardLabels(self))
self.votes = component.Component(vote.Votes(self))
self.author = component.Component(usermanager.get_app_user(data.author.username, data=data.author))
self._weight = component.Component(CardWeightEditor(self))
# members part of the card
self.overlay_add_members = component.Component(
overlay.Overlay(lambda r: '+',
lambda r: component.Component(self).render(r, model='add_member_overlay'), dynamic=True, cls='card-overlay'))
self.new_member = component.Component(usermanager.AddMembers(self.autocomplete_method)).on_answer(self.add_members)
self.members = [component.Component(usermanager.get_app_user(member.username, data=member))
for member in data.members]
self.see_all_members = component.Component(overlay.Overlay(lambda r: "%s more..." % (len(self.members) - self.max_shown_members),
lambda r: component.Component(self).on_answer(self.remove_member).render(r, model='members_list_overlay'),
dynamic=False, cls='card-overlay'))
@property
def favorites(self):
"""Return favorites users for a given card
Ask favorites to self.column
Store favorites in self._favorites to avoid CallbackLookupError
Return:
- list of favorites (User instances) wrappend on component
"""
self._favorites = [component.Component(usermanager.get_app_user(username), "friend").on_answer(self.add_members)
for (username, _) in sorted(self.column.favorites.items(), key=lambda e:-e[1])[:5]
if username not in [member().username for member in self.members]]
return self._favorites
@property
def data(self):
"""Return the card object from the database
"""
if self._data is None:
self._data = DataCard.get(self.db_id)
return self._data
def __getstate__(self):
self._data = None
return self.__dict__
@property
def weight(self):
return self.data.weight
@weight.setter
def weight(self, value):
values = {'from': self.data.weight, 'to': value, 'card': self.data.title}
notifications.add_history(self.column.board.data, self.data, security.get_user().data, u'card_weight', values)
self.data.weight = value
def set_title(self, title):
"""Set title
In:
- ``title`` -- new title
"""
values = {'from': self.data.title, 'to': title}
notifications.add_history(self.column.board.data, self.data, security.get_user().data, u'card_title', values)
self.data.title = title
def get_title(self):
"""Get title
Return :
- the card title
"""
return self.data.title
def delete(self):
"""Delete itself"""
self.gallery().delete_assets()
DataCard.delete_card(self.data)
def move_card(self, card_index, column):
"""Move card
In:
- ``card_index`` -- new index of the card
- ``column`` -- new father
"""
data_card = self.data
data_card.index = card_index
column.data.cards.append(data_card)
self.column = column
def get_authorized_users(self):
"""Return user's which are authorized to be add on this card
Return:
- a set of user (UserData instance)
"""
return set(self.column.get_authorized_users()) | set(self.column.get_pending_users()) - set(self.data.members)
def autocomplete_method(self, value):
""" """
return [u for u in usermanager.UserManager.search(value) if u in self.get_authorized_users()]
def get_available_labels(self):
return self.column.get_available_labels()
#################
# Members methods
#################
def add_members(self, emails):
"""Add new members from emails
In:
- ``emails`` -- emails in string separated by "," or list of strings
Return:
- JS code, reload card and hide overlay
"""
members = []
if isinstance(emails, (str, unicode)):
emails = [e.strip() for e in emails.split(',') if e.strip() != '']
# Get all users with emails
for email in emails:
new_member = usermanager.UserManager.get_by_email(email)
if new_member:
members.append(new_member)
self._add_members(members)
return "YAHOO.kansha.reload_cards['%s']();YAHOO.kansha.app.hideOverlay();" % self.id
def _add_members(self, new_data_members):
"""Add members to a card
In:
- ``new_data_members`` -- all UserData instance to attach to card
Return:
- list of new DataMembers added
"""
res = []
for new_data_member in new_data_members:
if self.add_member(new_data_member):
res.append(new_data_member)
values = {'user_id': new_data_member.username, 'user': new_data_member.fullname, 'card': self.data.title}
notifications.add_history(self.column.board.data, self.data, security.get_user().data, u'card_add_member', values)
return res
def add_member(self, new_data_member):
"""Attach new member to card
In:
- ``new_data_member`` -- UserData instance
Return:
- the new DataMember added
"""
data = self.data
if (new_data_member not in data.members and
new_data_member in self.get_authorized_users()):
log.debug('Adding %s to members' % (new_data_member.username,))
data.members.append(new_data_member)
self.members.append(component.Component(usermanager.get_app_user(new_data_member.username, data=new_data_member)))
return new_data_member
def remove_member(self, username):
"""Remove member username from card member"""
data_member = usermanager.UserManager().get_by_username(username)
if data_member:
log.debug('Removing %s from card %s' % (username, self.id))
data = self.data
data.members.remove(data_member)
for member in self.members:
if member().username == username:
self.members.remove(member)
values = {'user_id': member().username, 'user': member().data.fullname, 'card': data.title}
notifications.add_history(self.column.board.data, data, security.get_user().data, u'card_remove_member', values)
else:
raise exceptions.KanshaException(_("User not found : %s" % username))
def remove_board_member(self, member):
"""Remove member from board
Remove member from board. If member is linked to a card, remove it
from the list of members
In:
- ``member`` -- Board Member instance to remove
"""
self.data.remove_board_member(member)
self.members = [component.Component(usermanager.get_app_user(m.username, data=m))
for m in self.data.members]
# Cover methods
def make_cover(self, asset):
"""Make card cover with asset
In:
- ``asset`` -- New cover, Asset component
"""
self.data.make_cover(asset)
def has_cover(self):
return self.data.cover is not None
def get_cover(self):
return gallery.Asset(self.data.cover, self.assets_manager)
def remove_cover(self):
self.data.remove_cover()
def new_start_from_ajax(self, request, response):
start = dateutil.parser.parse(request.GET['start']).date()
self.due_date().set_value(start)
class CardTitle(title.Title):
"""Card title component
"""
model = DataCard
field_type = 'input'
class CardDescription(description.Description):
# We work on wards
model = DataCard
type = _L('card')
class CardFlow(object):
"""Flow of comments, pictures, and so on, associated to a card"""
def __init__(self, card, *source_components):
"""Init method
In:
- ``source_components`` -- Components
- on an object inheriting from FlowSource
- having a "flow" view
"""
self.card = card
self.source_components = source_components
@property
def elements(self):
res = []
for s in self.source_components:
res.extend(s().flow_elements)
return sorted(res, key=lambda el: getattr(el(), 'creation_date', ''), reverse=True)
class CardWeightEditor(editor.Editor):
""" Card weight Form
"""
fields = {'weight'}
def __init__(self, target, *args):
"""
In:
- ``target`` -- Card instance
"""
super(CardWeightEditor, self).__init__(target, self.fields)
self.weight.validate(self.validate_weight)
def validate_weight(self, value):
"""
Integer or empty
"""
if value:
validator.IntValidator(value).to_int()
return value
@property
def board(self):
return self.target.board
def commit(self):
if self.is_validated(self.fields):
super(CardWeightEditor, self).commit(self.fields)
return True
return False
| bsd-3-clause | -3,670,669,339,380,194,000 | 31.964481 | 173 | 0.594115 | false |
google-research/google-research | ncsnv3/ncsn_lib.py | 1 | 21967 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Training and evalution for score-based generative models."""
import functools
import gc
import io
import os
import time
from typing import Any
from . import datasets
from . import evaluation
from . import losses
from . import models # Keep this import for registering all model definitions.
from . import sampling
from . import utils
from .models import utils as mutils
from absl import logging
import flax
import flax.jax_utils as flax_utils
from flax.metrics import tensorboard
import flax.nn as nn
from flax.training import checkpoints
import jax
import jax.numpy as jnp
import ml_collections
from .models import ddpm, ncsnv2, ncsnv3
import numpy as np
import tensorflow as tf
import tensorflow_gan as tfgan
def train(config, workdir):
"""Runs a training loop.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints and TF summaries. If this
contains checkpoint training will be resumed from the latest checkpoint.
"""
# Create directories for experimental logs
tf.io.gfile.makedirs(workdir)
sample_dir = os.path.join(workdir, "samples")
tf.io.gfile.makedirs(sample_dir)
rng = jax.random.PRNGKey(config.seed)
tb_dir = os.path.join(workdir, "tensorboard")
tf.io.gfile.makedirs(tb_dir)
if jax.host_id() == 0:
writer = tensorboard.SummaryWriter(tb_dir)
# Initialize model.
rng, model_rng = jax.random.split(rng)
model_name = config.model.name
ncsn_def = mutils.get_model(model_name).partial(config=config)
rng, run_rng = jax.random.split(rng)
# Whether the generative model is conditioned on class labels
class_conditional = "conditional" in config.training.loss.lower()
with nn.stateful() as init_model_state:
with nn.stochastic(run_rng):
input_shape = (jax.local_device_count(), config.data.image_size,
config.data.image_size, 3)
input_list = [(input_shape, jnp.float32), (input_shape[:1], jnp.int32)]
if class_conditional:
input_list.append(input_list[-1])
_, initial_params = ncsn_def.init_by_shape(
model_rng, input_list, train=True)
ncsn = nn.Model(ncsn_def, initial_params)
optimizer = losses.get_optimizer(config).create(ncsn)
state = mutils.State(step=0, optimizer=optimizer, lr=config.optim.lr,
model_state=init_model_state,
ema_rate=config.model.ema_rate,
params_ema=initial_params,
rng=rng) # pytype: disable=wrong-keyword-args
del ncsn, init_model_state # Do not keep a copy of the initial model.
# Create checkpoints directory and the initial checkpoint
checkpoint_dir = os.path.join(workdir, "checkpoints")
ckpt = utils.Checkpoint(
checkpoint_dir,
max_to_keep=None)
ckpt.restore_or_initialize(state)
# Save intermediate checkpoints to resume training automatically
checkpoint_meta_dir = os.path.join(workdir, "checkpoints-meta")
ckpt_meta = utils.Checkpoint(
checkpoint_meta_dir,
max_to_keep=1)
state = ckpt_meta.restore_or_initialize(state)
initial_step = int(state.step)
rng = state.rng
# Build input pipeline.
rng, ds_rng = jax.random.split(rng)
train_ds, eval_ds, _ = datasets.get_dataset(ds_rng, config)
train_iter = iter(train_ds) # pytype: disable=wrong-arg-types
eval_iter = iter(eval_ds) # pytype: disable=wrong-arg-types
scaler = datasets.get_data_scaler(config) # data normalizer
inverse_scaler = datasets.get_data_inverse_scaler(config)
# Distribute training.
optimize_fn = losses.optimization_manager(config)
if config.training.loss.lower() == "ddpm":
# Use score matching loss with DDPM-type perturbation.
ddpm_params = mutils.get_ddpm_params()
train_step = functools.partial(losses.ddpm_loss, ddpm_params=ddpm_params,
train=True, optimize_fn=optimize_fn)
eval_step = functools.partial(losses.ddpm_loss, ddpm_params=ddpm_params,
train=False)
else:
# Use score matching loss with NCSN-type perturbation.
sigmas = mutils.get_sigmas(config)
# Whether to use a continuous distribution of noise levels
continuous = "continuous" in config.training.loss.lower()
train_step = functools.partial(
losses.ncsn_loss,
sigmas=sigmas,
class_conditional=class_conditional,
continuous=continuous,
train=True,
optimize_fn=optimize_fn,
anneal_power=config.training.anneal_power)
eval_step = functools.partial(
losses.ncsn_loss,
sigmas=sigmas,
class_conditional=class_conditional,
continuous=continuous,
train=False,
anneal_power=config.training.anneal_power)
p_train_step = jax.pmap(train_step, axis_name="batch")
p_eval_step = jax.pmap(eval_step, axis_name="batch")
state = flax_utils.replicate(state)
num_train_steps = config.training.n_iters
logging.info("Starting training loop at step %d.", initial_step)
rng = jax.random.fold_in(rng, jax.host_id())
for step in range(initial_step, num_train_steps + 1):
# `step` is a Python integer. `state.step` is JAX integer on the GPU/TPU
# devices.
# Convert data to JAX arrays. Use ._numpy() to avoid copy.
batch = jax.tree_map(lambda x: scaler(x._numpy()), next(train_iter)) # pylint: disable=protected-access
rng, *next_rng = jax.random.split(rng, num=jax.local_device_count() + 1)
next_rng = jnp.asarray(next_rng)
loss, state = p_train_step(next_rng, state, batch)
loss = flax.jax_utils.unreplicate(loss)
# Quick indication that training is happening.
logging.log_first_n(logging.INFO, "Finished training step %d.", 5, step)
if jax.host_id() == 0 and step % 50 == 0:
logging.info("step: %d, training_loss: %.5e", step, loss)
writer.scalar("training_loss", loss, step)
# Save a temporary checkpoint to resume training after pre-emption.
if step % config.training.snapshot_freq_for_preemption == 0 and jax.host_id(
) == 0:
saved_state = flax_utils.unreplicate(state)
saved_state = saved_state.replace(rng=rng)
ckpt_meta.save(saved_state)
# Report the loss on an evaluation dataset.
if step % 100 == 0:
rng, *next_rng = jax.random.split(rng, num=jax.local_device_count() + 1)
next_rng = jnp.asarray(next_rng)
eval_batch = jax.tree_map(lambda x: scaler(x._numpy()), next(eval_iter)) # pylint: disable=protected-access
eval_loss, _ = p_eval_step(next_rng, state, eval_batch)
eval_loss = flax.jax_utils.unreplicate(eval_loss)
if jax.host_id() == 0:
logging.info("step: %d, eval_loss: %.5e", step, eval_loss)
writer.scalar("eval_loss", eval_loss, step)
# Save a checkpoint periodically and generate samples.
if (step +
1) % config.training.snapshot_freq == 0 or step == num_train_steps:
# Save the checkpoint.
if jax.host_id() == 0:
saved_state = flax_utils.unreplicate(state)
saved_state = saved_state.replace(rng=rng)
ckpt.save(saved_state)
# Generate and save samples
if config.training.snapshot_sampling:
rng, sample_rng = jax.random.split(rng)
init_shape = tuple(train_ds.element_spec["image"].shape)
samples = sampling.get_samples(sample_rng,
config,
flax_utils.unreplicate(state),
init_shape,
scaler,
inverse_scaler,
class_conditional=class_conditional)
this_sample_dir = os.path.join(
sample_dir, "iter_{}_host_{}".format(step, jax.host_id()))
tf.io.gfile.makedirs(this_sample_dir)
if config.sampling.final_only: # Do not save intermediate samples
sample = samples[-1]
image_grid = sample.reshape((-1, *sample.shape[2:]))
nrow = int(np.sqrt(image_grid.shape[0]))
sample = np.clip(sample * 255, 0, 255).astype(np.uint8)
with tf.io.gfile.GFile(
os.path.join(this_sample_dir, "sample.np"), "wb") as fout:
np.save(fout, sample)
with tf.io.gfile.GFile(
os.path.join(this_sample_dir, "sample.png"), "wb") as fout:
utils.save_image(image_grid, fout, nrow=nrow, padding=2)
else: # Save all intermediate samples produced during sampling.
for i, sample in enumerate(samples):
image_grid = sample.reshape((-1, *sample.shape[2:]))
nrow = int(np.sqrt(image_grid.shape[0]))
sample = np.clip(sample * 255, 0, 255).astype(np.uint8)
with tf.io.gfile.GFile(
os.path.join(this_sample_dir, "sample_{}.np".format(i)),
"wb") as fout:
np.save(fout, sample)
with tf.io.gfile.GFile(
os.path.join(this_sample_dir, "sample_{}.png".format(i)),
"wb") as fout:
utils.save_image(image_grid, fout, nrow=nrow, padding=2)
def evaluate(config,
workdir,
eval_folder = "eval"):
"""Evaluate trained models.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints.
eval_folder: The subfolder for storing evaluation results. Default to
"eval".
"""
# Create eval_dir
eval_dir = os.path.join(workdir, eval_folder)
tf.io.gfile.makedirs(eval_dir)
rng = jax.random.PRNGKey(config.seed + 1)
# Build input pipeline.
rng, ds_rng = jax.random.split(rng)
_, eval_ds, _ = datasets.get_dataset(ds_rng, config, evaluation=True)
scaler = datasets.get_data_scaler(config)
inverse_scaler = datasets.get_data_inverse_scaler(config)
# Initialize model.
rng, model_rng = jax.random.split(rng)
model_name = config.model.name
ncsn_def = mutils.get_model(model_name).partial(config=config)
rng, run_rng = jax.random.split(rng)
class_conditional = "conditional" in config.training.loss.lower()
with nn.stateful() as init_model_state:
with nn.stochastic(run_rng):
input_shape = tuple(eval_ds.element_spec["image"].shape[1:])
input_list = [(input_shape, jnp.float32), (input_shape[:1], jnp.int32)]
if class_conditional:
input_list.append(input_list[-1])
_, initial_params = ncsn_def.init_by_shape(
model_rng, input_list, train=True)
ncsn = nn.Model(ncsn_def, initial_params)
optimizer = losses.get_optimizer(config).create(ncsn)
state = mutils.State(step=0, optimizer=optimizer, lr=config.optim.lr,
model_state=init_model_state,
ema_rate=config.model.ema_rate,
params_ema=initial_params,
rng=rng) # pytype: disable=wrong-keyword-args
del ncsn, init_model_state # Do not keep a copy of the initial model.
checkpoint_dir = os.path.join(workdir, "checkpoints")
if config.training.loss.lower() == "ddpm":
# Use the score matching loss with DDPM-type perturbation.
ddpm_params = mutils.get_ddpm_params()
eval_step = functools.partial(
losses.ddpm_loss, ddpm_params=ddpm_params, train=False)
else:
# Use the score matching loss with NCSN-type perturbation.
sigmas = mutils.get_sigmas(config)
continuous = "continuous" in config.training.loss.lower()
eval_step = functools.partial(
losses.ncsn_loss,
sigmas=sigmas,
continuous=continuous,
class_conditional=class_conditional,
train=False,
anneal_power=config.training.anneal_power)
p_eval_step = jax.pmap(eval_step, axis_name="batch")
rng = jax.random.fold_in(rng, jax.host_id())
# A data class for checkpointing.
@flax.struct.dataclass
class EvalMeta:
ckpt_id: int
round_id: int
rng: Any
# Add one additional round to get the exact number of samples as required.
num_rounds = config.eval.num_samples // config.eval.batch_size + 1
eval_meta = EvalMeta(ckpt_id=config.eval.begin_ckpt, round_id=-1, rng=rng)
eval_meta = checkpoints.restore_checkpoint(
eval_dir, eval_meta, step=None, prefix=f"meta_{jax.host_id()}_")
if eval_meta.round_id < num_rounds - 1:
begin_ckpt = eval_meta.ckpt_id
begin_round = eval_meta.round_id + 1
else:
begin_ckpt = eval_meta.ckpt_id + 1
begin_round = 0
rng = eval_meta.rng
# Use inceptionV3 for images with higher resolution
inceptionv3 = config.data.image_size >= 256
inception_model = evaluation.get_inception_model(inceptionv3=inceptionv3)
logging.info("begin checkpoint: %d", begin_ckpt)
for ckpt in range(begin_ckpt, config.eval.end_ckpt + 1):
ckpt_filename = os.path.join(checkpoint_dir, "ckpt-{}.flax".format(ckpt))
# Wait if the target checkpoint hasn't been produced yet.
waiting_message_printed = False
while not tf.io.gfile.exists(ckpt_filename):
if not waiting_message_printed and jax.host_id() == 0:
logging.warn("Waiting for the arrival of ckpt-%d.flax", ckpt)
waiting_message_printed = True
time.sleep(10)
# In case the file was just written and not ready to read from yet.
try:
state = utils.load_state_dict(ckpt_filename, state)
except:
time.sleep(60)
try:
state = utils.load_state_dict(ckpt_filename, state)
except:
time.sleep(120)
state = utils.load_state_dict(ckpt_filename, state)
pstate = flax.jax_utils.replicate(state)
eval_iter = iter(eval_ds) # pytype: disable=wrong-arg-types
# Compute the loss function on the full evaluation dataset.
all_losses = []
for i, batch in enumerate(eval_iter):
rng, *next_rng = jax.random.split(rng, num=jax.local_device_count() + 1)
next_rng = jnp.asarray(next_rng)
eval_batch = jax.tree_map(lambda x: scaler(x._numpy()), batch) # pylint: disable=protected-access
eval_loss, _ = p_eval_step(next_rng, pstate, eval_batch)
eval_loss = flax.jax_utils.unreplicate(eval_loss)
all_losses.append(eval_loss)
if (i + 1) % 1000 == 0 and jax.host_id() == 0:
logging.info("Finished %dth step loss evaluation", i + 1)
all_losses = jnp.asarray(all_losses)
state = jax.device_put(state)
# Sampling and computing statistics for Inception scores, FIDs, and KIDs.
# Designed to be pre-emption safe. Automatically resumes when interrupted.
for r in range(begin_round, num_rounds):
if jax.host_id() == 0:
logging.info("sampling -- ckpt: %d, round: %d", ckpt, r)
rng, sample_rng = jax.random.split(rng)
init_shape = tuple(eval_ds.element_spec["image"].shape)
this_sample_dir = os.path.join(
eval_dir, f"ckpt_{ckpt}_host_{jax.host_id()}")
tf.io.gfile.makedirs(this_sample_dir)
samples = sampling.get_samples(sample_rng, config, state, init_shape,
scaler, inverse_scaler,
class_conditional=class_conditional)
samples = samples[-1]
samples = np.clip(samples * 255., 0, 255).astype(np.uint8)
samples = samples.reshape(
(-1, config.data.image_size, config.data.image_size, 3))
with tf.io.gfile.GFile(
os.path.join(this_sample_dir, f"samples_{r}.npz"), "wb") as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, samples=samples)
fout.write(io_buffer.getvalue())
gc.collect()
latents = evaluation.run_inception_distributed(samples, inception_model,
inceptionv3=inceptionv3)
gc.collect()
with tf.io.gfile.GFile(
os.path.join(this_sample_dir, f"statistics_{r}.npz"), "wb") as fout:
io_buffer = io.BytesIO()
np.savez_compressed(
io_buffer, pool_3=latents["pool_3"], logits=latents["logits"])
fout.write(io_buffer.getvalue())
eval_meta = eval_meta.replace(ckpt_id=ckpt, round_id=r, rng=rng)
# Save an intermediate checkpoint directly if not the last round.
# Otherwise save eval_meta after computing the Inception scores and FIDs
if r < num_rounds - 1:
checkpoints.save_checkpoint(
eval_dir,
eval_meta,
step=ckpt * num_rounds + r,
keep=1,
prefix=f"meta_{jax.host_id()}_")
# Compute inception scores, FIDs and KIDs.
if jax.host_id() == 0:
# Load all statistics that have been previously computed and saved.
all_logits = []
all_pools = []
for host in range(jax.host_count()):
this_sample_dir = os.path.join(eval_dir, f"ckpt_{ckpt}_host_{host}")
stats = tf.io.gfile.glob(
os.path.join(this_sample_dir, "statistics_*.npz"))
wait_message = False
while len(stats) < num_rounds:
if not wait_message:
logging.warn("Waiting for statistics on host %d", host)
wait_message = True
stats = tf.io.gfile.glob(
os.path.join(this_sample_dir, "statistics_*.npz"))
time.sleep(1)
for stat_file in stats:
with tf.io.gfile.GFile(stat_file, "rb") as fin:
stat = np.load(fin)
if not inceptionv3:
all_logits.append(stat["logits"])
all_pools.append(stat["pool_3"])
if not inceptionv3:
all_logits = np.concatenate(
all_logits, axis=0)[:config.eval.num_samples]
all_pools = np.concatenate(all_pools, axis=0)[:config.eval.num_samples]
# Load pre-computed dataset statistics.
data_stats = evaluation.load_dataset_stats(config)
data_pools = data_stats["pool_3"]
if hasattr(config.eval, "num_partitions"):
# Divide samples into several partitions and compute FID/KID/IS on them.
assert not inceptionv3
fids = []
kids = []
inception_scores = []
partition_size = config.eval.num_samples // config.eval.num_partitions
tf_data_pools = tf.convert_to_tensor(data_pools)
for i in range(config.eval.num_partitions):
this_pools = all_pools[i * partition_size:(i + 1) * partition_size]
this_logits = all_logits[i * partition_size:(i + 1) * partition_size]
inception_scores.append(
tfgan.eval.classifier_score_from_logits(this_logits))
fids.append(
tfgan.eval.frechet_classifier_distance_from_activations(
data_pools, this_pools))
this_pools = tf.convert_to_tensor(this_pools)
kids.append(
tfgan.eval.kernel_classifier_distance_from_activations(
tf_data_pools, this_pools).numpy())
fids = np.asarray(fids)
inception_scores = np.asarray(inception_scores)
kids = np.asarray(kids)
with tf.io.gfile.GFile(os.path.join(eval_dir, f"report_all_{ckpt}.npz"),
"wb") as f:
io_buffer = io.BytesIO()
np.savez_compressed(
io_buffer, all_losses=all_losses, mean_loss=all_losses.mean(),
ISs=inception_scores, fids=fids, kids=kids)
f.write(io_buffer.getvalue())
else:
# Compute FID/KID/IS on all samples together.
if not inceptionv3:
inception_score = tfgan.eval.classifier_score_from_logits(all_logits)
else:
inception_score = -1
fid = tfgan.eval.frechet_classifier_distance_from_activations(
data_pools, all_pools)
# Hack to get tfgan KID work for eager execution.
tf_data_pools = tf.convert_to_tensor(data_pools)
tf_all_pools = tf.convert_to_tensor(all_pools)
kid = tfgan.eval.kernel_classifier_distance_from_activations(
tf_data_pools, tf_all_pools).numpy()
del tf_data_pools, tf_all_pools
logging.info(
"ckpt-%d --- loss: %.6e, inception_score: %.6e, FID: %.6e, KID: %.6e",
ckpt, all_losses.mean(), inception_score, fid, kid)
with tf.io.gfile.GFile(os.path.join(eval_dir, f"report_{ckpt}.npz"),
"wb") as f:
io_buffer = io.BytesIO()
np.savez_compressed(
io_buffer, all_losses=all_losses, mean_loss=all_losses.mean(),
IS=inception_score, fid=fid, kid=kid)
f.write(io_buffer.getvalue())
else:
# For host_id() != 0.
# Use file existence to emulate synchronization across hosts.
if hasattr(config.eval, "num_partitions"):
assert not inceptionv3
while not tf.io.gfile.exists(
os.path.join(eval_dir, f"report_all_{ckpt}.npz")):
time.sleep(1.)
else:
while not tf.io.gfile.exists(
os.path.join(eval_dir, f"report_{ckpt}.npz")):
time.sleep(1.)
# Save eval_meta after computing IS/KID/FID to mark the end of evaluation
# for this checkpoint.
checkpoints.save_checkpoint(
eval_dir,
eval_meta,
step=ckpt * num_rounds + r,
keep=1,
prefix=f"meta_{jax.host_id()}_")
begin_round = 0
# Remove all meta files after finishing evaluation.
meta_files = tf.io.gfile.glob(
os.path.join(eval_dir, f"meta_{jax.host_id()}_*"))
for file in meta_files:
tf.io.gfile.remove(file)
| apache-2.0 | -2,651,883,456,299,889,700 | 38.79529 | 114 | 0.630946 | false |
mcalmer/spacewalk | client/rhel/rhnlib/rhn/rpclib.py | 1 | 24163 | #
# This module contains all the RPC-related functions the RHN code uses
#
# Copyright (c) 2005--2018 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
__version__ = "$Revision$"
import socket
import re
import sys
from rhn import transports
from rhn.i18n import sstr
from rhn.UserDictCase import UserDictCase
try: # python2
import xmlrpclib
from types import ListType, TupleType, StringType, UnicodeType, DictType, DictionaryType
from urllib import splittype, splithost
except ImportError: # python3
import xmlrpc.client as xmlrpclib
ListType = list
TupleType = tuple
StringType = bytes
UnicodeType = str
DictType = dict
DictionaryType = dict
from urllib.parse import splittype, splithost
# Redirection handling
MAX_REDIRECTIONS = 5
def check_ipv6(n):
""" Returns true if n is IPv6 address, false otherwise. """
try:
socket.inet_pton(socket.AF_INET6, n)
return True
except:
return False
def split_host(hoststring):
""" Function used to split host information in an URL per RFC 2396
handle full hostname like user:passwd@host:port
"""
l = hoststring.split('@', 1)
host = None
port = None
user = None
passwd = None
if len(l) == 2:
hostport = l[1]
# userinfo present
userinfo = l[0].split(':', 1)
user = userinfo[0]
if len(userinfo) == 2:
passwd = userinfo[1]
else:
hostport = l[0]
# Now parse hostport
if hostport[0] == '[':
# IPv6 with port
host, port = re.split('(?<=\]):', hostport, 1)
host = host.lstrip('[').rstrip(']')
elif check_ipv6(hostport):
# just IPv6
host = hostport
else:
# IPv4
arr = hostport.split(':', 1)
host = arr[0]
if len(arr) == 2:
port = arr[1]
return (host, port, user, passwd)
def get_proxy_info(proxy):
if proxy == None:
raise ValueError("Host string cannot be null")
arr = proxy.split('://', 1)
if len(arr) == 2:
# scheme found, strip it
proxy = arr[1]
return split_host(proxy)
class MalformedURIError(IOError):
pass
# Originaly taken from xmlrpclib.ServerProxy, now changed most of the code
class Server:
"""uri [,options] -> a logical connection to an XML-RPC server
uri is the connection point on the server, given as
scheme://host/target.
The standard implementation always supports the "http" scheme. If
SSL socket support is available (Python 2.0), it also supports
"https".
If the target part and the slash preceding it are both omitted,
"/RPC2" is assumed.
The following options can be given as keyword arguments:
transport: a transport factory
encoding: the request encoding (default is UTF-8)
verbose: verbosity level
proxy: use an HTTP proxy
username: username for authenticated HTTP proxy
password: password for authenticated HTTP proxy
All 8-bit strings passed to the server proxy are assumed to use
the given encoding.
"""
# Default factories
_transport_class = transports.Transport
_transport_class_https = transports.SafeTransport
_transport_class_proxy = transports.ProxyTransport
_transport_class_https_proxy = transports.SafeProxyTransport
def __init__(self, uri, transport=None, encoding=None, verbose=0,
proxy=None, username=None, password=None, refreshCallback=None,
progressCallback=None, timeout=None):
# establish a "logical" server connection
#
# First parse the proxy information if available
#
if proxy != None:
(ph, pp, pu, pw) = get_proxy_info(proxy)
if pp is not None:
proxy = "%s:%s" % (ph, pp)
else:
proxy = ph
# username and password will override whatever was passed in the
# URL
if pu is not None and username is None:
username = pu
if pw is not None and password is None:
password = pw
self._uri = sstr(uri)
self._refreshCallback = None
self._progressCallback = None
self._bufferSize = None
self._proxy = proxy
self._username = username
self._password = password
self._timeout = timeout
if len(__version__.split()) > 1:
self.rpc_version = __version__.split()[1]
else:
self.rpc_version = __version__
self._reset_host_handler_and_type()
if transport is None:
self._allow_redirect = 1
transport = self.default_transport(self._type, proxy, username,
password, timeout)
else:
#
# dont allow redirect on unknow transports, that should be
# set up independantly
#
self._allow_redirect = 0
self._redirected = None
self.use_handler_path = 1
self._transport = transport
self._trusted_cert_files = []
self._lang = None
self._encoding = encoding
self._verbose = verbose
self.set_refresh_callback(refreshCallback)
self.set_progress_callback(progressCallback)
# referer, which redirect us to new handler
self.send_handler=None
self._headers = UserDictCase()
def default_transport(self, type, proxy=None, username=None, password=None,
timeout=None):
if proxy:
if type == 'https':
transport = self._transport_class_https_proxy(proxy,
proxyUsername=username, proxyPassword=password, timeout=timeout)
else:
transport = self._transport_class_proxy(proxy,
proxyUsername=username, proxyPassword=password, timeout=timeout)
else:
if type == 'https':
transport = self._transport_class_https(timeout=timeout)
else:
transport = self._transport_class(timeout=timeout)
return transport
def allow_redirect(self, allow):
self._allow_redirect = allow
def redirected(self):
if not self._allow_redirect:
return None
return self._redirected
def set_refresh_callback(self, refreshCallback):
self._refreshCallback = refreshCallback
self._transport.set_refresh_callback(refreshCallback)
def set_buffer_size(self, bufferSize):
self._bufferSize = bufferSize
self._transport.set_buffer_size(bufferSize)
def set_progress_callback(self, progressCallback, bufferSize=16384):
self._progressCallback = progressCallback
self._transport.set_progress_callback(progressCallback, bufferSize)
def _req_body(self, params, methodname):
return xmlrpclib.dumps(params, methodname, encoding=self._encoding)
def get_response_headers(self):
if self._transport:
return self._transport.headers_in
return None
def get_response_status(self):
if self._transport:
return self._transport.response_status
return None
def get_response_reason(self):
if self._transport:
return self._transport.response_reason
return None
def get_content_range(self):
"""Returns a dictionary with three values:
length: the total length of the entity-body (can be None)
first_byte_pos: the position of the first byte (zero based)
last_byte_pos: the position of the last byte (zero based)
The range is inclusive; that is, a response 8-9/102 means two bytes
"""
headers = self.get_response_headers()
if not headers:
return None
content_range = headers.get('Content-Range')
if not content_range:
return None
arr = filter(None, content_range.split())
assert arr[0] == "bytes"
assert len(arr) == 2
arr = arr[1].split('/')
assert len(arr) == 2
brange, total_len = arr
if total_len == '*':
# Per RFC, the server is allowed to use * if the length of the
# entity-body is unknown or difficult to determine
total_len = None
else:
total_len = int(total_len)
start, end = brange.split('-')
result = {
'length' : total_len,
'first_byte_pos' : int(start),
'last_byte_pos' : int(end),
}
return result
def accept_ranges(self):
headers = self.get_response_headers()
if not headers:
return None
if 'Accept-Ranges' in headers:
return headers['Accept-Ranges']
return None
def _reset_host_handler_and_type(self):
""" Reset the attributes:
self._host, self._handler, self._type
according the value of self._uri.
"""
# get the url
type, uri = splittype(self._uri)
if type is None:
raise MalformedURIError("missing protocol in uri")
# with a real uri passed in, uri will now contain "//hostname..." so we
# need at least 3 chars for it to maybe be ok...
if len(uri) < 3 or uri[0:2] != "//":
raise MalformedURIError
self._type = type.lower()
if self._type not in ("http", "https"):
raise IOError("unsupported XML-RPC protocol")
self._host, self._handler = splithost(uri)
if not self._handler:
self._handler = "/RPC2"
def _strip_characters(self, *args):
""" Strip characters, which are not allowed according:
http://www.w3.org/TR/2006/REC-xml-20060816/#charsets
From spec:
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
"""
regexp = r'[\x00-\x09]|[\x0b-\x0c]|[\x0e-\x1f]'
result=[]
for item in args:
item_type = type(item)
if item_type == StringType or item_type == UnicodeType:
item = re.sub(regexp, '', sstr(item))
elif item_type == TupleType:
item = tuple(self._strip_characters(i) for i in item)
elif item_type == ListType:
item = [self._strip_characters(i) for i in item]
elif item_type == DictType or item_type == DictionaryType:
item = dict([(self._strip_characters(name, val)) for name, val in item.items()])
# else: some object - should take care of himself
# numbers - are safe
result.append(item)
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _request(self, methodname, params):
""" Call a method on the remote server
we can handle redirections. """
# the loop is used to handle redirections
redirect_response = 0
retry = 0
self._reset_host_handler_and_type()
while 1:
if retry >= MAX_REDIRECTIONS:
raise InvalidRedirectionError(
"Unable to fetch requested Package")
# Clear the transport headers first
self._transport.clear_headers()
for k, v in self._headers.items():
self._transport.set_header(k, v)
self._transport.add_header("X-Info",
'RPC Processor (C) Red Hat, Inc (version %s)' %
self.rpc_version)
# identify the capability set of this client to the server
self._transport.set_header("X-Client-Version", 1)
if self._allow_redirect:
# Advertise that we follow redirects
#changing the version from 1 to 2 to support backward compatibility
self._transport.add_header("X-RHN-Transport-Capability",
"follow-redirects=3")
if redirect_response:
self._transport.add_header('X-RHN-Redirect', '0')
if self.send_handler:
self._transport.add_header('X-RHN-Path', self.send_handler)
request = self._req_body(self._strip_characters(params), methodname)
try:
response = self._transport.request(self._host, \
self._handler, request, verbose=self._verbose)
save_response = self._transport.response_status
except xmlrpclib.ProtocolError:
if self.use_handler_path:
raise
else:
save_response = sys.exc_info()[1].errcode
self._redirected = None
retry += 1
if save_response == 200:
# exit redirects loop and return response
break
elif save_response not in (301, 302):
# Retry pkg fetch
self.use_handler_path = 1
continue
# rest of loop is run only if we are redirected (301, 302)
self._redirected = self._transport.redirected()
self.use_handler_path = 0
redirect_response = 1
if not self._allow_redirect:
raise InvalidRedirectionError("Redirects not allowed")
if self._verbose:
print("%s redirected to %s" % (self._uri, self._redirected))
typ, uri = splittype(self._redirected)
if typ != None:
typ = typ.lower()
if typ not in ("http", "https"):
raise InvalidRedirectionError(
"Redirected to unsupported protocol %s" % typ)
#
# We forbid HTTPS -> HTTP for security reasons
# Note that HTTP -> HTTPS -> HTTP is allowed (because we compare
# the protocol for the redirect with the original one)
#
if self._type == "https" and typ == "http":
raise InvalidRedirectionError(
"HTTPS redirected to HTTP is not supported")
self._host, self._handler = splithost(uri)
if not self._handler:
self._handler = "/RPC2"
# Create a new transport for the redirected service and
# set up the parameters on the new transport
del self._transport
self._transport = self.default_transport(typ, self._proxy,
self._username, self._password, self._timeout)
self.set_progress_callback(self._progressCallback)
self.set_refresh_callback(self._refreshCallback)
self.set_buffer_size(self._bufferSize)
self.setlang(self._lang)
if self._trusted_cert_files != [] and \
hasattr(self._transport, "add_trusted_cert"):
for certfile in self._trusted_cert_files:
self._transport.add_trusted_cert(certfile)
# Then restart the loop to try the new entry point.
if isinstance(response, transports.File):
# Just return the file
return response
# an XML-RPC encoded data structure
if isinstance(response, TupleType) and len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<%s for %s%s>" %
(self.__class__.__name__, self._host, self._handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self._request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def set_transport_flags(self, transfer=0, encoding=0, **kwargs):
if not self._transport:
# Nothing to do
return
kwargs.update({
'transfer' : transfer,
'encoding' : encoding,
})
self._transport.set_transport_flags(**kwargs)
def get_transport_flags(self):
if not self._transport:
# Nothing to do
return {}
return self._transport.get_transport_flags()
def reset_transport_flags(self):
# Does nothing
pass
# Allow user-defined additional headers.
def set_header(self, name, arg):
if type(arg) in [ type([]), type(()) ]:
# Multivalued header
self._headers[name] = [str(a) for a in arg]
else:
self._headers[name] = str(arg)
def add_header(self, name, arg):
if name in self._headers:
vlist = self._headers[name]
if not isinstance(vlist, ListType):
vlist = [ vlist ]
else:
vlist = self._headers[name] = []
vlist.append(str(arg))
# Sets the i18n options
def setlang(self, lang):
self._lang = lang
if self._transport and hasattr(self._transport, "setlang"):
self._transport.setlang(lang)
# Sets the CA chain to be used
def use_CA_chain(self, ca_chain = None):
raise NotImplementedError("This method is deprecated")
def add_trusted_cert(self, certfile):
self._trusted_cert_files.append(certfile)
if self._transport and hasattr(self._transport, "add_trusted_cert"):
self._transport.add_trusted_cert(certfile)
def close(self):
if self._transport:
self._transport.close()
self._transport = None
# RHN GET server
class GETServer(Server):
def __init__(self, uri, transport=None, proxy=None, username=None,
password=None, client_version=2, headers={}, refreshCallback=None,
progressCallback=None, timeout=None):
Server.__init__(self, uri,
proxy=proxy,
username=username,
password=password,
transport=transport,
refreshCallback=refreshCallback,
progressCallback=progressCallback,
timeout=timeout)
self._client_version = client_version
self._headers = headers
# Back up the original handler, since we mangle it
self._orig_handler = self._handler
# Download resumption
self.set_range(offset=None, amount=None)
def _req_body(self, params, methodname):
if not params or len(params) < 1:
raise Exception("Required parameter channel not found")
# Strip the multiple / from the handler
h_comps = filter(lambda x: x != '', self._orig_handler.split('/'))
# Set the handler we are going to request
hndl = h_comps + ["$RHN", params[0], methodname] + list(params[1:])
self._handler = '/' + '/'.join(hndl)
#save the constructed handler in case of redirect
self.send_handler = self._handler
# Add headers
#override the handler to replace /XMLRPC with pkg path
if self._redirected and not self.use_handler_path:
self._handler = self._new_req_body()
for h, v in self._headers.items():
self._transport.set_header(h, v)
if self._offset is not None:
if self._offset >= 0:
brange = str(self._offset) + '-'
if self._amount is not None:
brange = brange + str(self._offset + self._amount - 1)
else:
# The last bytes
# amount is ignored in this case
brange = '-' + str(-self._offset)
self._transport.set_header('Range', "bytes=" + brange)
# Flag that we allow for partial content
self._transport.set_transport_flags(allow_partial_content=1)
# GET requests have empty body
return ""
def _new_req_body(self):
type, tmpuri = splittype(self._redirected)
site, handler = splithost(tmpuri)
return handler
def set_range(self, offset=None, amount=None):
if offset is not None:
try:
offset = int(offset)
except ValueError:
# Error
raise RangeError("Invalid value `%s' for offset" % offset, None, sys.exc_info()[2])
if amount is not None:
try:
amount = int(amount)
except ValueError:
# Error
raise RangeError("Invalid value `%s' for amount" % amount, None, sys.exc_info()[2])
if amount <= 0:
raise RangeError("Invalid value `%s' for amount" % amount)
self._amount = amount
self._offset = offset
def reset_transport_flags(self):
self._transport.set_transport_flags(allow_partial_content=0)
def __getattr__(self, name):
# magic method dispatcher
return SlicingMethod(self._request, name)
def default_transport(self, type, proxy=None, username=None, password=None,
timeout=None):
ret = Server.default_transport(self, type, proxy=proxy, username=username, password=password, timeout=timeout)
ret.set_method("GET")
return ret
class RangeError(Exception):
pass
class InvalidRedirectionError(Exception):
pass
def getHeaderValues(headers, name):
import mimetools
if not isinstance(headers, mimetools.Message):
if name in headers:
return [headers[name]]
return []
return [x.split(':', 1)[1].strip() for x in
headers.getallmatchingheaders(name)]
class _Method:
""" some magic to bind an XML-RPC method to an RPC server.
supports "nested" methods (e.g. examples.getStateName)
"""
def __init__(self, send, name):
self._send = send
self._name = name
def __getattr__(self, name):
return _Method(self._send, "%s.%s" % (self._name, name))
def __call__(self, *args):
return self._send(self._name, args)
def __repr__(self):
return (
"<%s %s (%s)>" %
(self.__class__.__name__, self._name, self._send)
)
__str__ = __repr__
class SlicingMethod(_Method):
"""
A "slicing method" allows for byte range requests
"""
def __init__(self, send, name):
_Method.__init__(self, send, name)
self._offset = None
def __getattr__(self, name):
return SlicingMethod(self._send, "%s.%s" % (self._name, name))
def __call__(self, *args, **kwargs):
self._offset = kwargs.get('offset')
self._amount = kwargs.get('amount')
# im_self is a pointer to self, so we can modify the class underneath
try:
self._send.im_self.set_range(offset=self._offset,
amount=self._amount)
except AttributeError:
pass
result = self._send(self._name, args)
# Reset "sticky" transport flags
try:
self._send.im_self.reset_transport_flags()
except AttributeError:
pass
return result
def reportError(headers):
""" Reports the error from the headers. """
errcode = 0
errmsg = ""
s = "X-RHN-Fault-Code"
if s in headers:
errcode = int(headers[s])
s = "X-RHN-Fault-String"
if s in headers:
_sList = getHeaderValues(headers, s)
if _sList:
_s = ''.join(_sList)
import base64
errmsg = "%s" % base64.decodestring(_s)
return errcode, errmsg
| gpl-2.0 | 4,209,307,803,917,549,600 | 32.794406 | 169 | 0.573687 | false |
bsipocz/ccdproc | ccdproc/tests/pytest_fixtures.py | 1 | 1765 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from astropy.tests.helper import pytest
from astropy import units as u
from astropy.utils import NumpyRNGContext
from ..ccddata import CCDData
# If additional pytest markers are defined the key in the dictionary below
# should be the name of the marker.
DEFAULTS = {
'seed': 123,
'data_size': 100,
'data_scale': 1.0,
'data_mean': 0.0
}
DEFAULT_SEED = 123
DEFAULT_DATA_SIZE = 100
DEFAULT_DATA_SCALE = 1.0
def value_from_markers(key, request):
try:
val = request.keywords[key].args[0]
except KeyError:
val = DEFAULTS[key]
return val
@pytest.fixture
def ccd_data(request):
"""
Return a CCDData object with units of ADU.
The size of the data array is 100x100 but can be changed using the marker
@pytest.mark.data_size(N) on the test function, where N should be the
desired dimension.
Data values are initialized to random numbers drawn from a normal
distribution with mean of 0 and scale 1.
The scale can be changed with the marker @pytest.marker.scale(s) on the
test function, where s is the desired scale.
The mean can be changed with the marker @pytest.marker.scale(m) on the
test function, where m is the desired mean.
"""
size = value_from_markers('data_size', request)
scale = value_from_markers('data_scale', request)
mean = value_from_markers('data_mean', request)
with NumpyRNGContext(DEFAULTS['seed']):
data = np.random.normal(loc=mean, size=[size, size], scale=scale)
fake_meta = {'my_key': 42, 'your_key': 'not 42'}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
| bsd-3-clause | 1,015,693,112,330,947,700 | 27.467742 | 77 | 0.67932 | false |
jyates/SimpleDBHammer | src/mongo.py | 1 | 1090 | '''
Copyright 2011 Jesse Yates
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Description: Example class for running mongo
'''
from hammerclient import Client
def main():
"""To run this example:
1) start mongod on the localhost, running on port 27017
2) Add a database to mongo named "test"
3) Run: "./python mongo.py"
"""
# Kind of cheating here by just setting the arguments you should be passing in
client = Client(["-c", "examples/mongo_example_hammer.cfg"])
client.start()
if __name__=='__main__':
main()
| apache-2.0 | -4,374,926,212,630,796,300 | 31.088235 | 82 | 0.686239 | false |
jenfly/monsoon-onset | scripts/thesis-figs.py | 1 | 9561 | import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
sys.path.append('/home/jwalker/dynamics/python/monsoon-onset')
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import animation
import collections
import atmos as atm
import merra
import indices
import utils
figwidth = 12
style = atm.homedir() + 'dynamics/python/mpl-styles/presentation.mplstyle'
plt.style.use(style)
fontsize = mpl.rcParams['font.size']
# ----------------------------------------------------------------------
pcpfile = '/home/jwalker/datastore/gpcp/gpcp_daily_1997-2014.nc'
datadir = atm.homedir() + 'datastore/merra2/analysis/'
files = {'PREC' : datadir + 'gpcp_dailyrel_CHP_MFC_1997-2015.nc'}
for nm in ['U', 'V']:
files[nm] = datadir + 'merra2_%s850_dailyrel_CHP_MFC_1980-2015.nc' % nm
mldfile = atm.homedir() + 'datastore/mld/ifremer_mld_DT02_c1m_reg2.0.nc'
indfile = datadir + 'merra2_index_CHP_MFC_1980-2015.nc'
lon1, lon2 = 60, 100
ndays = 5
with xray.open_dataset(pcpfile) as ds:
pcp = atm.subset(ds, {'day' : (1, 365)})
pcp.load()
for ssn in ['JAN', 'JUL', 'JJAS']:
days = atm.season_days(ssn)
pcp[ssn] = atm.dim_mean(pcp['PREC'], 'day', min(days), max(days))
pcp['ANN'] = pcp['PREC'].sum(dim='day')
pcp_jjas = pcp['PREC'].sel(day=atm.season_days('JJAS')).sum(dim='day')
pcp['FRAC'] = pcp_jjas / pcp['ANN']
pcp['PREC'] = atm.rolling_mean(pcp['PREC'], ndays, axis=0, center=True)
pcp['SECTOR'] = atm.dim_mean(pcp['PREC'], 'lon', lon1, lon2)
# Composites relative to onset day
data = {}
for nm in files:
filenm = files[nm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if 'year' in var:
var = var.mean(dim='year')
daydim = atm.get_coord(var, 'dayrel', 'dim')
data[nm] = atm.rolling_mean(var, ndays, axis=daydim)
# Mixed layer depths
imonth = 4 # Index for month of May
with xray.open_dataset(mldfile, decode_times=False) as ds:
mld = ds['mld'][imonth].load()
dims, coords = mld.dims, mld.coords
missval = mld.attrs['mask_value']
vals = mld.values
vals = np.ma.masked_array(vals, vals==missval)
vals = np.ma.filled(vals, np.nan)
mld = xray.DataArray(vals, dims=dims, coords=coords)
# Onset/retreat indices and timeseries
with xray.open_dataset(indfile) as index:
index.load()
# ----------------------------------------------------------------------
# Global precip maps in winter/summer
def precip_global(precip, clev=np.arange(0, 16.5, 1), cmap='hot_r'):
cticks = range(0, 17, 2)
m = atm.contourf_latlon(precip, clev=clev, cmap=cmap, extend='max',
colorbar=False)
cb = m.colorbar(ticks=cticks, size='3%')
cb.ax.set_title('mm/day', fontsize=12)
ssn_dict = {'JAN' : 'January', 'JUL' : 'July'}
fig_kw = {'figsize' : (0.75 * figwidth, 0.8 * figwidth)}
grp = atm.FigGroup(2, 1, fig_kw=fig_kw)
for ssn in ['JAN', 'JUL']:
grp.next()
precip_global(pcp[ssn])
plt.title(ssn_dict[ssn])
# Hovmoller plot of sector mean precip
def hovmoller(precip, clev=np.arange(0, 12.5, 1), cticks=np.arange(0, 12.5, 2),
cmap='hot_r', ylimits=(-40, 40)):
lat = atm.get_coord(precip, 'lat')
days = atm.get_coord(precip, 'day')
plt.contourf(days, lat, precip.T, clev, cmap=cmap, extend='max')
cb = plt.colorbar(ticks=cticks)
cb.ax.set_title('mm/day', fontsize=12)
plt.ylim(ylimits)
plt.xlim(2, 365)
plt.ylabel('Latitude')
plt.xlabel('Day of Year')
plt.figure(figsize=(0.8 * figwidth, 0.4*figwidth))
hovmoller(pcp['SECTOR'])
# Map of monsoon region
plt.figure(figsize=(0.4*figwidth, 0.6*figwidth))
m = atm.init_latlon(-50, 50, 40, 120, coastlines=False)
m.shadedrelief(scale=0.3)
yticks = range(-45, 46, 15)
xticks = range(40, 121, 20)
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.yticks(yticks, atm.latlon_labels(yticks, 'lat'))
#atm.geobox(10, 30, 60, 100, m=m, color='k')
# JJAS precip and fraction of annual totals
axlims = (-15, 35, 50, 115)
xticks = range(40, 121, 10)
clev = np.arange(0, 18.5, 1)
plt.figure(figsize=(0.8*figwidth, 0.5*figwidth))
m = atm.init_latlon(axlims[0], axlims[1], axlims[2], axlims[3], resolution='l')
atm.contourf_latlon(pcp['JJAS'], clev=clev, m=m, cmap='hot_r', extend='max')
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
_, cs = atm.contour_latlon(pcp['FRAC'], clev=[0.5], m=m, colors='b',
linewidths=2)
label_locs = [(65, 12)]
cs_opts = {'fmt' : '%.1f', 'fontsize' : fontsize, 'manual' : label_locs}
plt.clabel(cs, **cs_opts)
atm.geobox(10, 30, 60, 100, m=m, color='g')
plt.xlim(axlims[2], axlims[3])
# Mixed layer depths
def mld_map(mld, cmap='Blues', axlims=(0, 35, 58, 102), climits=(10, 60),
cticks=range(10, 71, 10), clevs=None):
cb_kwargs = {'ticks' : cticks, 'extend' : 'both'}
m = atm.init_latlon(axlims[0], axlims[1], axlims[2], axlims[3],
resolution='l', coastlines=False,
fillcontinents=True)
m.drawcoastlines(linewidth=0.5, color='0.5')
atm.pcolor_latlon(mld, m=m, cmap=cmap, cb_kwargs=cb_kwargs)
plt.clim(climits)
lat0 = 15.5
plt.figure(figsize=(0.5*figwidth, 0.35*figwidth))
mld_map(mld)
plt.axhline(lat0, color='k')
# ------------------------------------------------------------------------
# Animation of precip and winds
def animate(i):
days = range(-136, 227, 1)
day = days[i]
axlims=(-30, 45, 40, 120)
dx, dy = 5, 5
climits=(0, 20)
cmap = 'hot_r'
d0 = 138
cticks=np.arange(4, 21, 2)
scale = 250
clev=np.arange(4, 20.5, 1)
lat1, lat2, lon1, lon2 = axlims
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
xticks = range(40, 121, 20)
yticks = range(-20, 41, 10)
mm, dd = atm.jday_to_mmdd(day + d0)
title = (atm.month_str(mm)).capitalize() + ' %d' % dd
u = atm.subset(data['U'].sel(dayrel=day), subset_dict)
v = atm.subset(data['V'].sel(dayrel=day), subset_dict)
u = u[::dy, ::dx]
v = v[::dy, ::dx]
#spd = np.sqrt(u**2 + v**2)
pcp = data['PREC'].sel(dayrel=day)
lat = atm.get_coord(u, 'lat')
lon = atm.get_coord(u, 'lon')
plt.clf()
m = atm.init_latlon(lat1, lat2, lon1, lon2, coastlines=False)
m.drawcoastlines(color='k', linewidth=0.5)
m.shadedrelief(scale=0.3)
atm.contourf_latlon(pcp, clev=clev, axlims=axlims, m=m, cmap=cmap,
extend='max', cb_kwargs={'ticks' : cticks})
#atm.pcolor_latlon(pcp, axlims=axlims, cmap=cmap, cb_kwargs={'extend' : 'max'})
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.yticks(yticks, atm.latlon_labels(yticks, 'lat'))
plt.clim(climits)
#plt.quiver(lon, lat, u, v, linewidths=spd.values.ravel())
plt.quiver(lon, lat, u, v, scale=scale, pivot='middle')
plt.title(title)
plt.draw()
fig = plt.figure()
days = range(-136, 227, 1)
#anim = animation.FuncAnimation(fig, animate, frames=len(days),
# interval=20, blit=True)
#anim = animation.FuncAnimation(fig, animate, frames=len(days))
anim = animation.FuncAnimation(fig, animate, frames=30)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
writer=animation.FFMpegWriter(bitrate=500)
print('Saving animation')
anim.save('figs/anim/test.mp4', writer=writer, fps=30)
print('Done')
# --------------------------------------------------------------------------
# def animate(data, day, axlims=(-30, 45, 40, 120), dx=5, dy=5, climits=(0, 20),
# cmap='hot_r', d0=138, clev=np.arange(4, 20.5, 1),
# cticks=np.arange(4, 21, 2), scale=250):
# lat1, lat2, lon1, lon2 = axlims
# subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
# xticks = range(40, 121, 20)
# yticks = range(-20, 41, 10)
# mm, dd = atm.jday_to_mmdd(day + d0)
# title = (atm.month_str(mm)).capitalize() + ' %d' % dd
#
# u = atm.subset(data['U'].sel(dayrel=day), subset_dict)
# v = atm.subset(data['V'].sel(dayrel=day), subset_dict)
# u = u[::dy, ::dx]
# v = v[::dy, ::dx]
# #spd = np.sqrt(u**2 + v**2)
# pcp = data['PREC'].sel(dayrel=day)
# lat = atm.get_coord(u, 'lat')
# lon = atm.get_coord(u, 'lon')
#
# plt.clf()
# m = atm.init_latlon(lat1, lat2, lon1, lon2, coastlines=False)
# m.drawcoastlines(color='k', linewidth=0.5)
# m.shadedrelief(scale=0.3)
# atm.contourf_latlon(pcp, clev=clev, axlims=axlims, m=m, cmap=cmap,
# extend='max', cb_kwargs={'ticks' : cticks})
# #atm.pcolor_latlon(pcp, axlims=axlims, cmap=cmap, cb_kwargs={'extend' : 'max'})
# plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
# plt.yticks(yticks, atm.latlon_labels(yticks, 'lat'))
# plt.clim(climits)
# #plt.quiver(lon, lat, u, v, linewidths=spd.values.ravel())
# plt.quiver(lon, lat, u, v, scale=scale, pivot='middle')
# plt.title(title)
# plt.draw()
#
#
# days = range(-136, 227, 1)
# plt.figure()
# for i, day in enumerate(days):
# animate(data, day)
# filenm = 'figs/anim/frame%03d.png' % i
# print('Saving to ' + filenm)
# plt.savefig(filenm)
| mit | 6,556,450,848,463,915,000 | 35.773077 | 85 | 0.606422 | false |
brunobord/critica | apps/articles_epicurien/admin.py | 1 | 2409 | # -*- coding: utf-8 -*-
"""
Administration interface options for ``critica.apps.articles_epicurien`` models.
"""
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from critica.apps.admin.sites import basic_site, advanced_site
from critica.apps.articles.admin import BaseArticleAdmin
from critica.apps.articles_epicurien.models import ArticleEpicurien, ArticleEpicurienType
class ArticleEpicurienTypeAdmin(admin.ModelAdmin):
"""
Administration interface options of ``ArticleEpicurienType`` model.
"""
list_display = ('name', 'slug')
admin.site.register(ArticleEpicurienType, ArticleEpicurienTypeAdmin)
basic_site.register(ArticleEpicurienType, ArticleEpicurienTypeAdmin)
advanced_site.register(ArticleEpicurienType, ArticleEpicurienTypeAdmin)
class ArticleEpicurienAdmin(BaseArticleAdmin):
"""
Administration interface options of ``Article`` model.
"""
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets for the add form.
"""
fieldsets = [
(_('Headline'), {'fields': ('author_nickname', 'title', 'opinion')}),
(_('Filling'), {'fields': ('issues', 'type', 'tags')}),
(_('Illustration'), {'fields': ('illustration', 'use_default_illustration')}),
(_('Content'), {'fields': ('summary', 'content')}),
]
publication_fields = []
if request.user.has_perm('articles_epicurien.can_feature_article'):
publication_fields.append('is_featured')
if request.user.has_perm('articles_epicurien.can_reserve_article'):
publication_fields.append('is_reserved')
if request.user.has_perm('articles_epicurien.can_publish_article'):
publication_fields.append('is_ready_to_publish')
if request.user.has_perm('articles_epicurien.can_reserve_article') \
or request.user.has_perm('articles_epicurien.can_feature_article') \
or request.user.has_perm('articles_epicurien.can_publish_article'):
fieldsets += [(_('Publication'), {'fields': publication_fields})]
return fieldsets
admin.site.register(ArticleEpicurien, ArticleEpicurienAdmin)
basic_site.register(ArticleEpicurien, ArticleEpicurienAdmin)
advanced_site.register(ArticleEpicurien, ArticleEpicurienAdmin)
| gpl-3.0 | -3,054,804,316,831,667,000 | 38.491803 | 90 | 0.674554 | false |
palerdot/calibre | src/calibre/db/backend.py | 1 | 68501 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
# Imports {{{
import os, shutil, uuid, json, glob, time, cPickle, hashlib, errno
from functools import partial
import apsw
from calibre import isbytestring, force_unicode, prints
from calibre.constants import (iswindows, filesystem_encoding,
preferred_encoding)
from calibre.ptempfile import PersistentTemporaryFile, TemporaryFile
from calibre.db import SPOOL_SIZE
from calibre.db.schema_upgrades import SchemaUpgrade
from calibre.db.delete_service import delete_service
from calibre.db.errors import NoSuchFormat
from calibre.library.field_metadata import FieldMetadata
from calibre.ebooks.metadata import title_sort, author_to_author_sort
from calibre.utils.icu import sort_key
from calibre.utils.config import to_json, from_json, prefs, tweaks
from calibre.utils.date import utcfromtimestamp, parse_date
from calibre.utils.filenames import (
is_case_sensitive, samefile, hardlink_file, ascii_filename,
WindowsAtomicFolderMove, atomic_rename, remove_dir_if_empty)
from calibre.utils.magick.draw import save_cover_data_to
from calibre.utils.formatter_functions import load_user_template_functions
from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable, PathTable,
CompositeTable, UUIDTable, RatingTable)
# }}}
'''
Differences in semantics from pysqlite:
1. execute/executemany operate in autocommit mode
2. There is no fetchone() method on cursor objects, instead use next()
3. There is no executescript
'''
CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime',
'int', 'float', 'bool', 'series', 'composite', 'enumeration'])
class DynamicFilter(object): # {{{
'No longer used, present for legacy compatibility'
def __init__(self, name):
self.name = name
self.ids = frozenset([])
def __call__(self, id_):
return int(id_ in self.ids)
def change(self, ids):
self.ids = frozenset(ids)
# }}}
class DBPrefs(dict): # {{{
'Store preferences as key:value pairs in the db'
def __init__(self, db):
dict.__init__(self)
self.db = db
self.defaults = {}
self.disable_setting = False
self.load_from_db()
def load_from_db(self):
self.clear()
for key, val in self.db.conn.get('SELECT key,val FROM preferences'):
try:
val = self.raw_to_object(val)
except:
prints('Failed to read value for:', key, 'from db')
continue
dict.__setitem__(self, key, val)
def raw_to_object(self, raw):
if not isinstance(raw, unicode):
raw = raw.decode(preferred_encoding)
return json.loads(raw, object_hook=from_json)
def to_raw(self, val):
return json.dumps(val, indent=2, default=to_json)
def has_setting(self, key):
return key in self
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults[key]
def __delitem__(self, key):
dict.__delitem__(self, key)
self.db.execute('DELETE FROM preferences WHERE key=?', (key,))
def __setitem__(self, key, val):
if self.disable_setting:
return
raw = self.to_raw(val)
self.db.execute('INSERT OR REPLACE INTO preferences (key,val) VALUES (?,?)', (key, raw))
dict.__setitem__(self, key, val)
def set(self, key, val):
self.__setitem__(key, val)
def get_namespaced(self, namespace, key, default=None):
key = u'namespaced:%s:%s'%(namespace, key)
try:
return dict.__getitem__(self, key)
except KeyError:
return default
def set_namespaced(self, namespace, key, val):
if u':' in key:
raise KeyError('Colons are not allowed in keys')
if u':' in namespace:
raise KeyError('Colons are not allowed in the namespace')
key = u'namespaced:%s:%s'%(namespace, key)
self[key] = val
def write_serialized(self, library_path):
try:
to_filename = os.path.join(library_path, 'metadata_db_prefs_backup.json')
with open(to_filename, "wb") as f:
f.write(json.dumps(self, indent=2, default=to_json))
except:
import traceback
traceback.print_exc()
@classmethod
def read_serialized(cls, library_path, recreate_prefs=False):
from_filename = os.path.join(library_path,
'metadata_db_prefs_backup.json')
with open(from_filename, "rb") as f:
return json.load(f, object_hook=from_json)
# }}}
# Extra collators {{{
def pynocase(one, two, encoding='utf-8'):
if isbytestring(one):
try:
one = one.decode(encoding, 'replace')
except:
pass
if isbytestring(two):
try:
two = two.decode(encoding, 'replace')
except:
pass
return cmp(one.lower(), two.lower())
def _author_to_author_sort(x):
if not x:
return ''
return author_to_author_sort(x.replace('|', ','))
def icu_collator(s1, s2):
return cmp(sort_key(force_unicode(s1, 'utf-8')),
sort_key(force_unicode(s2, 'utf-8')))
# }}}
# Unused aggregators {{{
def Concatenate(sep=','):
'''String concatenation aggregator for sqlite'''
def step(ctxt, value):
if value is not None:
ctxt.append(value)
def finalize(ctxt):
if not ctxt:
return None
return sep.join(ctxt)
return ([], step, finalize)
def SortedConcatenate(sep=','):
'''String concatenation aggregator for sqlite, sorted by supplied index'''
def step(ctxt, ndx, value):
if value is not None:
ctxt[ndx] = value
def finalize(ctxt):
if len(ctxt) == 0:
return None
return sep.join(map(ctxt.get, sorted(ctxt.iterkeys())))
return ({}, step, finalize)
def IdentifiersConcat():
'''String concatenation aggregator for the identifiers map'''
def step(ctxt, key, val):
ctxt.append(u'%s:%s'%(key, val))
def finalize(ctxt):
return ','.join(ctxt)
return ([], step, finalize)
def AumSortedConcatenate():
'''String concatenation aggregator for the author sort map'''
def step(ctxt, ndx, author, sort, link):
if author is not None:
ctxt[ndx] = ':::'.join((author, sort, link))
def finalize(ctxt):
keys = list(ctxt.iterkeys())
l = len(keys)
if l == 0:
return None
if l == 1:
return ctxt[keys[0]]
return ':#:'.join([ctxt[v] for v in sorted(keys)])
return ({}, step, finalize)
# }}}
class Connection(apsw.Connection): # {{{
BUSY_TIMEOUT = 10000 # milliseconds
def __init__(self, path):
apsw.Connection.__init__(self, path)
self.setbusytimeout(self.BUSY_TIMEOUT)
self.execute('pragma cache_size=5000')
self.execute('pragma temp_store=2')
encoding = self.execute('pragma encoding').next()[0]
self.createcollation('PYNOCASE', partial(pynocase,
encoding=encoding))
self.createscalarfunction('title_sort', title_sort, 1)
self.createscalarfunction('author_to_author_sort',
_author_to_author_sort, 1)
self.createscalarfunction('uuid4', lambda: str(uuid.uuid4()),
0)
# Dummy functions for dynamically created filters
self.createscalarfunction('books_list_filter', lambda x: 1, 1)
self.createcollation('icucollate', icu_collator)
# Legacy aggregators (never used) but present for backwards compat
self.createaggregatefunction('sortconcat', SortedConcatenate, 2)
self.createaggregatefunction('sortconcat_bar',
partial(SortedConcatenate, sep='|'), 2)
self.createaggregatefunction('sortconcat_amper',
partial(SortedConcatenate, sep='&'), 2)
self.createaggregatefunction('identifiers_concat',
IdentifiersConcat, 2)
self.createaggregatefunction('concat', Concatenate, 1)
self.createaggregatefunction('aum_sortconcat',
AumSortedConcatenate, 4)
def create_dynamic_filter(self, name):
f = DynamicFilter(name)
self.createscalarfunction(name, f, 1)
def get(self, *args, **kw):
ans = self.cursor().execute(*args)
if kw.get('all', True):
return ans.fetchall()
try:
return ans.next()[0]
except (StopIteration, IndexError):
return None
def execute(self, sql, bindings=None):
cursor = self.cursor()
return cursor.execute(sql, bindings)
def executemany(self, sql, sequence_of_bindings):
with self: # Disable autocommit mode, for performance
return self.cursor().executemany(sql, sequence_of_bindings)
# }}}
class DB(object):
PATH_LIMIT = 40 if iswindows else 100
WINDOWS_LIBRARY_PATH_LIMIT = 75
# Initialize database {{{
def __init__(self, library_path, default_prefs=None, read_only=False,
restore_all_prefs=False, progress_callback=lambda x, y:True):
try:
if isbytestring(library_path):
library_path = library_path.decode(filesystem_encoding)
except:
import traceback
traceback.print_exc()
self.field_metadata = FieldMetadata()
self.library_path = os.path.abspath(library_path)
self.dbpath = os.path.join(library_path, 'metadata.db')
self.dbpath = os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH',
self.dbpath)
if iswindows and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError(_(
'Path to library ({0}) too long. Must be less than'
' {1} characters.').format(self.library_path, 259-4*self.PATH_LIMIT-10))
exists = self._exists = os.path.exists(self.dbpath)
if not exists:
# Be more strict when creating new libraries as the old calculation
# allowed for max path lengths of 265 chars.
if (iswindows and len(self.library_path) >
self.WINDOWS_LIBRARY_PATH_LIMIT):
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%self.WINDOWS_LIBRARY_PATH_LIMIT)
if read_only and os.path.exists(self.dbpath):
# Work on only a copy of metadata.db to ensure that
# metadata.db is not changed
pt = PersistentTemporaryFile('_metadata_ro.db')
pt.close()
shutil.copyfile(self.dbpath, pt.name)
self.dbpath = pt.name
if not os.path.exists(os.path.dirname(self.dbpath)):
os.makedirs(os.path.dirname(self.dbpath))
self._conn = None
if self.user_version == 0:
self.initialize_database()
if not os.path.exists(self.library_path):
os.makedirs(self.library_path)
self.is_case_sensitive = is_case_sensitive(self.library_path)
SchemaUpgrade(self, self.library_path, self.field_metadata)
# Guarantee that the library_id is set
self.library_id
# Fix legacy triggers and columns
self.execute('''
DROP TRIGGER IF EXISTS author_insert_trg;
CREATE TEMP TRIGGER author_insert_trg
AFTER INSERT ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
END;
DROP TRIGGER IF EXISTS author_update_trg;
CREATE TEMP TRIGGER author_update_trg
BEFORE UPDATE ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name)
WHERE id=NEW.id AND name <> NEW.name;
END;
UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL;
''')
# Initialize_prefs must be called before initialize_custom_columns because
# icc can set a pref.
self.initialize_prefs(default_prefs, restore_all_prefs, progress_callback)
self.initialize_custom_columns()
self.initialize_tables()
load_user_template_functions(self.library_id,
self.prefs.get('user_template_functions', []))
def initialize_prefs(self, default_prefs, restore_all_prefs, progress_callback): # {{{
self.prefs = DBPrefs(self)
if default_prefs is not None and not self._exists:
progress_callback(None, len(default_prefs))
# Only apply default prefs to a new database
for i, key in enumerate(default_prefs):
# be sure that prefs not to be copied are listed below
if restore_all_prefs or key not in frozenset(['news_to_be_synced']):
self.prefs[key] = default_prefs[key]
progress_callback(_('restored preference ') + key, i+1)
if 'field_metadata' in default_prefs:
fmvals = [f for f in default_prefs['field_metadata'].values()
if f['is_custom']]
progress_callback(None, len(fmvals))
for i, f in enumerate(fmvals):
progress_callback(_('creating custom column ') + f['label'], i)
self.create_custom_column(f['label'], f['name'],
f['datatype'],
(f['is_multiple'] is not None and
len(f['is_multiple']) > 0),
f['is_editable'], f['display'])
defs = self.prefs.defaults
defs['gui_restriction'] = defs['cs_restriction'] = ''
defs['categories_using_hierarchy'] = []
defs['column_color_rules'] = []
defs['column_icon_rules'] = []
defs['grouped_search_make_user_categories'] = []
defs['similar_authors_search_key'] = 'authors'
defs['similar_authors_match_kind'] = 'match_any'
defs['similar_publisher_search_key'] = 'publisher'
defs['similar_publisher_match_kind'] = 'match_any'
defs['similar_tags_search_key'] = 'tags'
defs['similar_tags_match_kind'] = 'match_all'
defs['similar_series_search_key'] = 'series'
defs['similar_series_match_kind'] = 'match_any'
defs['book_display_fields'] = [
('title', False), ('authors', True), ('formats', True),
('series', True), ('identifiers', True), ('tags', True),
('path', True), ('publisher', False), ('rating', False),
('author_sort', False), ('sort', False), ('timestamp', False),
('uuid', False), ('comments', True), ('id', False), ('pubdate', False),
('last_modified', False), ('size', False), ('languages', False),
]
defs['virtual_libraries'] = {}
defs['virtual_lib_on_startup'] = defs['cs_virtual_lib_on_startup'] = ''
defs['virt_libs_hidden'] = defs['virt_libs_order'] = ()
defs['update_all_last_mod_dates_on_start'] = False
defs['field_under_covers_in_grid'] = 'title'
# Migrate the bool tristate tweak
defs['bools_are_tristate'] = \
tweaks.get('bool_custom_columns_are_tristate', 'yes') == 'yes'
if self.prefs.get('bools_are_tristate') is None:
self.prefs.set('bools_are_tristate', defs['bools_are_tristate'])
# Migrate column coloring rules
if self.prefs.get('column_color_name_1', None) is not None:
from calibre.library.coloring import migrate_old_rule
old_rules = []
for i in range(1, 6):
col = self.prefs.get('column_color_name_'+str(i), None)
templ = self.prefs.get('column_color_template_'+str(i), None)
if col and templ:
try:
del self.prefs['column_color_name_'+str(i)]
rules = migrate_old_rule(self.field_metadata, templ)
for templ in rules:
old_rules.append((col, templ))
except:
pass
if old_rules:
self.prefs['column_color_rules'] += old_rules
# Migrate saved search and user categories to db preference scheme
def migrate_preference(key, default):
oldval = prefs[key]
if oldval != default:
self.prefs[key] = oldval
prefs[key] = default
if key not in self.prefs:
self.prefs[key] = default
migrate_preference('user_categories', {})
migrate_preference('saved_searches', {})
# migrate grouped_search_terms
if self.prefs.get('grouped_search_terms', None) is None:
try:
ogst = tweaks.get('grouped_search_terms', {})
ngst = {}
for t in ogst:
ngst[icu_lower(t)] = ogst[t]
self.prefs.set('grouped_search_terms', ngst)
except:
pass
# migrate the gui_restriction preference to a virtual library
gr_pref = self.prefs.get('gui_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['gui_restriction'] = ''
self.prefs['virtual_lib_on_startup'] = gr_pref
# migrate the cs_restriction preference to a virtual library
gr_pref = self.prefs.get('cs_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['cs_restriction'] = ''
self.prefs['cs_virtual_lib_on_startup'] = gr_pref
# Rename any user categories with names that differ only in case
user_cats = self.prefs.get('user_categories', [])
catmap = {}
for uc in user_cats:
ucl = icu_lower(uc)
if ucl not in catmap:
catmap[ucl] = []
catmap[ucl].append(uc)
cats_changed = False
for uc in catmap:
if len(catmap[uc]) > 1:
prints('found user category case overlap', catmap[uc])
cat = catmap[uc][0]
suffix = 1
while icu_lower((cat + unicode(suffix))) in catmap:
suffix += 1
prints('Renaming user category %s to %s'%(cat, cat+unicode(suffix)))
user_cats[cat + unicode(suffix)] = user_cats[cat]
del user_cats[cat]
cats_changed = True
if cats_changed:
self.prefs.set('user_categories', user_cats)
# }}}
def initialize_custom_columns(self): # {{{
self.custom_columns_deleted = False
with self.conn:
# Delete previously marked custom columns
for record in self.conn.get(
'SELECT id FROM custom_columns WHERE mark_for_delete=1'):
num = record[0]
table, lt = self.custom_table_names(num)
self.execute('''\
DROP INDEX IF EXISTS {table}_idx;
DROP INDEX IF EXISTS {lt}_aidx;
DROP INDEX IF EXISTS {lt}_bidx;
DROP TRIGGER IF EXISTS fkc_update_{lt}_a;
DROP TRIGGER IF EXISTS fkc_update_{lt}_b;
DROP TRIGGER IF EXISTS fkc_insert_{lt};
DROP TRIGGER IF EXISTS fkc_delete_{lt};
DROP TRIGGER IF EXISTS fkc_insert_{table};
DROP TRIGGER IF EXISTS fkc_delete_{table};
DROP VIEW IF EXISTS tag_browser_{table};
DROP VIEW IF EXISTS tag_browser_filtered_{table};
DROP TABLE IF EXISTS {table};
DROP TABLE IF EXISTS {lt};
'''.format(table=table, lt=lt)
)
self.prefs.set('update_all_last_mod_dates_on_start', True)
self.execute('DELETE FROM custom_columns WHERE mark_for_delete=1')
# Load metadata for custom columns
self.custom_column_label_map, self.custom_column_num_map = {}, {}
self.custom_column_num_to_label_map = {}
triggers = []
remove = []
custom_tables = self.custom_tables
for record in self.conn.get(
'SELECT label,name,datatype,editable,display,normalized,id,is_multiple FROM custom_columns'):
data = {
'label':record[0],
'name':record[1],
'datatype':record[2],
'editable':bool(record[3]),
'display':json.loads(record[4]),
'normalized':bool(record[5]),
'num':record[6],
'is_multiple':bool(record[7]),
}
if data['display'] is None:
data['display'] = {}
# set up the is_multiple separator dict
if data['is_multiple']:
if data['display'].get('is_names', False):
seps = {'cache_to_list': '|', 'ui_to_list': '&', 'list_to_ui': ' & '}
elif data['datatype'] == 'composite':
seps = {'cache_to_list': ',', 'ui_to_list': ',', 'list_to_ui': ', '}
else:
seps = {'cache_to_list': '|', 'ui_to_list': ',', 'list_to_ui': ', '}
else:
seps = {}
data['multiple_seps'] = seps
table, lt = self.custom_table_names(data['num'])
if table not in custom_tables or (data['normalized'] and lt not in
custom_tables):
remove.append(data)
continue
self.custom_column_num_map[data['num']] = \
self.custom_column_label_map[data['label']] = data
self.custom_column_num_to_label_map[data['num']] = data['label']
# Create Foreign Key triggers
if data['normalized']:
trigger = 'DELETE FROM %s WHERE book=OLD.id;'%lt
else:
trigger = 'DELETE FROM %s WHERE book=OLD.id;'%table
triggers.append(trigger)
if remove:
with self.conn:
for data in remove:
prints('WARNING: Custom column %r not found, removing.' %
data['label'])
self.execute('DELETE FROM custom_columns WHERE id=?',
(data['num'],))
if triggers:
with self.conn:
self.execute('''\
CREATE TEMP TRIGGER custom_books_delete_trg
AFTER DELETE ON books
BEGIN
%s
END;
'''%(' \n'.join(triggers)))
# Setup data adapters
def adapt_text(x, d):
if d['is_multiple']:
if x is None:
return []
if isinstance(x, (str, unicode, bytes)):
x = x.split(d['multiple_seps']['ui_to_list'])
x = [y.strip() for y in x if y.strip()]
x = [y.decode(preferred_encoding, 'replace') if not isinstance(y,
unicode) else y for y in x]
return [u' '.join(y.split()) for y in x]
else:
return x if x is None or isinstance(x, unicode) else \
x.decode(preferred_encoding, 'replace')
def adapt_datetime(x, d):
if isinstance(x, (str, unicode, bytes)):
x = parse_date(x, assume_utc=False, as_utc=False)
return x
def adapt_bool(x, d):
if isinstance(x, (str, unicode, bytes)):
x = x.lower()
if x == 'true':
x = True
elif x == 'false':
x = False
elif x == 'none':
x = None
else:
x = bool(int(x))
return x
def adapt_enum(x, d):
v = adapt_text(x, d)
if not v:
v = None
return v
def adapt_number(x, d):
if x is None:
return None
if isinstance(x, (str, unicode, bytes)):
if x.lower() == 'none':
return None
if d['datatype'] == 'int':
return int(x)
return float(x)
self.custom_data_adapters = {
'float': adapt_number,
'int': adapt_number,
'rating':lambda x,d: x if x is None else min(10., max(0., float(x))),
'bool': adapt_bool,
'comments': lambda x,d: adapt_text(x, {'is_multiple':False}),
'datetime': adapt_datetime,
'text':adapt_text,
'series':adapt_text,
'enumeration': adapt_enum
}
# Create Tag Browser categories for custom columns
for k in sorted(self.custom_column_label_map.iterkeys()):
v = self.custom_column_label_map[k]
if v['normalized']:
is_category = True
else:
is_category = False
is_m = v['multiple_seps']
tn = 'custom_column_{0}'.format(v['num'])
self.field_metadata.add_custom_field(label=v['label'],
table=tn, column='value', datatype=v['datatype'],
colnum=v['num'], name=v['name'], display=v['display'],
is_multiple=is_m, is_category=is_category,
is_editable=v['editable'], is_csp=False)
# }}}
def initialize_tables(self): # {{{
tables = self.tables = {}
for col in ('title', 'sort', 'author_sort', 'series_index', 'comments',
'timestamp', 'pubdate', 'uuid', 'path', 'cover',
'last_modified'):
metadata = self.field_metadata[col].copy()
if col == 'comments':
metadata['table'], metadata['column'] = 'comments', 'text'
if not metadata['table']:
metadata['table'], metadata['column'] = 'books', ('has_cover'
if col == 'cover' else col)
if not metadata['column']:
metadata['column'] = col
tables[col] = (PathTable if col == 'path' else UUIDTable if col == 'uuid' else OneToOneTable)(col, metadata)
for col in ('series', 'publisher'):
tables[col] = ManyToOneTable(col, self.field_metadata[col].copy())
for col in ('authors', 'tags', 'formats', 'identifiers', 'languages', 'rating'):
cls = {
'authors':AuthorsTable,
'formats':FormatsTable,
'identifiers':IdentifiersTable,
'rating':RatingTable,
}.get(col, ManyToManyTable)
tables[col] = cls(col, self.field_metadata[col].copy())
tables['size'] = SizeTable('size', self.field_metadata['size'].copy())
self.FIELD_MAP = {
'id':0, 'title':1, 'authors':2, 'timestamp':3, 'size':4,
'rating':5, 'tags':6, 'comments':7, 'series':8, 'publisher':9,
'series_index':10, 'sort':11, 'author_sort':12, 'formats':13,
'path':14, 'pubdate':15, 'uuid':16, 'cover':17, 'au_map':18,
'last_modified':19, 'identifiers':20, 'languages':21,
}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.itervalues())
for label_, data in self.custom_column_label_map.iteritems():
label = self.field_metadata.custom_field_prefix + label_
metadata = self.field_metadata[label].copy()
link_table = self.custom_table_names(data['num'])[1]
self.FIELD_MAP[data['num']] = base = base+1
self.field_metadata.set_field_record_index(label_, base,
prefer_custom=True)
if data['datatype'] == 'series':
# account for the series index column. Field_metadata knows that
# the series index is one larger than the series. If you change
# it here, be sure to change it there as well.
self.FIELD_MAP[str(data['num'])+'_index'] = base = base+1
self.field_metadata.set_field_record_index(label_+'_index', base,
prefer_custom=True)
if data['normalized']:
if metadata['is_multiple']:
tables[label] = ManyToManyTable(label, metadata,
link_table=link_table)
else:
tables[label] = ManyToOneTable(label, metadata,
link_table=link_table)
if metadata['datatype'] == 'series':
# Create series index table
label += '_index'
metadata = self.field_metadata[label].copy()
metadata['column'] = 'extra'
metadata['table'] = link_table
tables[label] = OneToOneTable(label, metadata)
else:
if data['datatype'] == 'composite':
tables[label] = CompositeTable(label, metadata)
else:
tables[label] = OneToOneTable(label, metadata)
self.FIELD_MAP['ondevice'] = base = base+1
self.field_metadata.set_field_record_index('ondevice', base, prefer_custom=False)
self.FIELD_MAP['marked'] = base = base+1
self.field_metadata.set_field_record_index('marked', base, prefer_custom=False)
self.FIELD_MAP['series_sort'] = base = base+1
self.field_metadata.set_field_record_index('series_sort', base, prefer_custom=False)
# }}}
@property
def conn(self):
if self._conn is None:
self._conn = Connection(self.dbpath)
if self._exists and self.user_version == 0:
self._conn.close()
os.remove(self.dbpath)
self._conn = Connection(self.dbpath)
return self._conn
def execute(self, sql, bindings=None):
try:
return self.conn.cursor().execute(sql, bindings)
except apsw.IOError:
# This can happen if the computer was suspended see for example:
# https://bugs.launchpad.net/bugs/1286522. Try to reopen the db
if not self.conn.getautocommit():
raise # We are in a transaction, re-opening the db will fail anyway
self.reopen(force=True)
return self.conn.cursor().execute(sql, bindings)
def executemany(self, sql, sequence_of_bindings):
try:
with self.conn: # Disable autocommit mode, for performance
return self.conn.cursor().executemany(sql, sequence_of_bindings)
except apsw.IOError:
# This can happen if the computer was suspended see for example:
# https://bugs.launchpad.net/bugs/1286522. Try to reopen the db
if not self.conn.getautocommit():
raise # We are in a transaction, re-opening the db will fail anyway
self.reopen(force=True)
with self.conn: # Disable autocommit mode, for performance
return self.conn.cursor().executemany(sql, sequence_of_bindings)
def get(self, *args, **kw):
ans = self.execute(*args)
if kw.get('all', True):
return ans.fetchall()
try:
return ans.next()[0]
except (StopIteration, IndexError):
return None
def last_insert_rowid(self):
return self.conn.last_insert_rowid()
def custom_field_name(self, label=None, num=None):
if label is not None:
return self.field_metadata.custom_field_prefix + label
return self.field_metadata.custom_field_prefix + self.custom_column_num_to_label_map[num]
def custom_field_metadata(self, label=None, num=None):
if label is not None:
return self.custom_column_label_map[label]
return self.custom_column_num_map[num]
def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None):
changed = False
if name is not None:
self.execute('UPDATE custom_columns SET name=? WHERE id=?', (name, num))
changed = True
if label is not None:
self.execute('UPDATE custom_columns SET label=? WHERE id=?', (label, num))
changed = True
if is_editable is not None:
self.execute('UPDATE custom_columns SET editable=? WHERE id=?', (bool(is_editable), num))
self.custom_column_num_map[num]['is_editable'] = bool(is_editable)
changed = True
if display is not None:
self.execute('UPDATE custom_columns SET display=? WHERE id=?', (json.dumps(display), num))
changed = True
# Note: the caller is responsible for scheduling a metadata backup if necessary
return changed
def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}): # {{{
import re
if not label:
raise ValueError(_('No label was provided'))
if re.match('^\w*$', label) is None or not label[0].isalpha() or label.lower() != label:
raise ValueError(_('The label must contain only lower case letters, digits and underscores, and start with a letter'))
if datatype not in CUSTOM_DATA_TYPES:
raise ValueError('%r is not a supported data type'%datatype)
normalized = datatype not in ('datetime', 'comments', 'int', 'bool',
'float', 'composite')
is_multiple = is_multiple and datatype in ('text', 'composite')
self.execute(
('INSERT INTO '
'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
'VALUES (?,?,?,?,?,?,?)'),
(label, name, datatype, is_multiple, editable, json.dumps(display), normalized))
num = self.conn.last_insert_rowid()
if datatype in ('rating', 'int'):
dt = 'INT'
elif datatype in ('text', 'comments', 'series', 'composite', 'enumeration'):
dt = 'TEXT'
elif datatype in ('float',):
dt = 'REAL'
elif datatype == 'datetime':
dt = 'timestamp'
elif datatype == 'bool':
dt = 'BOOL'
collate = 'COLLATE NOCASE' if dt == 'TEXT' else ''
table, lt = self.custom_table_names(num)
if normalized:
if datatype == 'series':
s_index = 'extra REAL,'
else:
s_index = ''
lines = [
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
value %s NOT NULL %s,
UNIQUE(value));
'''%(table, dt, collate),
'CREATE INDEX %s_idx ON %s (value %s);'%(table, table, collate),
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
book INTEGER NOT NULL,
value INTEGER NOT NULL,
%s
UNIQUE(book, value)
);'''%(lt, s_index),
'CREATE INDEX %s_aidx ON %s (value);'%(lt,lt),
'CREATE INDEX %s_bidx ON %s (book);'%(lt,lt),
'''\
CREATE TRIGGER fkc_update_{lt}_a
BEFORE UPDATE OF book ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_{lt}_b
BEFORE UPDATE OF author ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
END;
END;
CREATE TRIGGER fkc_insert_{lt}
BEFORE INSERT ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
END;
END;
CREATE TRIGGER fkc_delete_{lt}
AFTER DELETE ON {table}
BEGIN
DELETE FROM {lt} WHERE value=OLD.id;
END;
CREATE VIEW tag_browser_{table} AS SELECT
id,
value,
(SELECT COUNT(id) FROM {lt} WHERE value={table}.id) count,
(SELECT AVG(r.rating)
FROM {lt},
books_ratings_link as bl,
ratings as r
WHERE {lt}.value={table}.id and bl.book={lt}.book and
r.id = bl.rating and r.rating <> 0) avg_rating,
value AS sort
FROM {table};
CREATE VIEW tag_browser_filtered_{table} AS SELECT
id,
value,
(SELECT COUNT({lt}.id) FROM {lt} WHERE value={table}.id AND
books_list_filter(book)) count,
(SELECT AVG(r.rating)
FROM {lt},
books_ratings_link as bl,
ratings as r
WHERE {lt}.value={table}.id AND bl.book={lt}.book AND
r.id = bl.rating AND r.rating <> 0 AND
books_list_filter(bl.book)) avg_rating,
value AS sort
FROM {table};
'''.format(lt=lt, table=table),
]
else:
lines = [
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
book INTEGER,
value %s NOT NULL %s,
UNIQUE(book));
'''%(table, dt, collate),
'CREATE INDEX %s_idx ON %s (book);'%(table, table),
'''\
CREATE TRIGGER fkc_insert_{table}
BEFORE INSERT ON {table}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_{table}
BEFORE UPDATE OF book ON {table}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
'''.format(table=table),
]
script = ' \n'.join(lines)
self.execute(script)
self.prefs.set('update_all_last_mod_dates_on_start', True)
return num
# }}}
def delete_custom_column(self, label=None, num=None):
data = self.custom_field_metadata(label, num)
self.execute('UPDATE custom_columns SET mark_for_delete=1 WHERE id=?', (data['num'],))
def close(self, force=False):
if getattr(self, '_conn', None) is not None:
self._conn.close(force)
del self._conn
def reopen(self, force=False):
self.close(force)
self._conn = None
self.conn
def dump_and_restore(self, callback=None, sql=None):
import codecs
from calibre.utils.apsw_shell import Shell
from contextlib import closing
if callback is None:
callback = lambda x: x
uv = int(self.user_version)
with TemporaryFile(suffix='.sql') as fname:
if sql is None:
callback(_('Dumping database to SQL') + '...')
with codecs.open(fname, 'wb', encoding='utf-8') as buf:
shell = Shell(db=self.conn, stdout=buf)
shell.process_command('.dump')
else:
with open(fname, 'wb') as buf:
buf.write(sql if isinstance(sql, bytes) else sql.encode('utf-8'))
with TemporaryFile(suffix='_tmpdb.db', dir=os.path.dirname(self.dbpath)) as tmpdb:
callback(_('Restoring database from SQL') + '...')
with closing(Connection(tmpdb)) as conn:
shell = Shell(db=conn, encoding='utf-8')
shell.process_command('.read ' + fname.replace(os.sep, '/'))
conn.execute('PRAGMA user_version=%d;'%uv)
self.close()
try:
atomic_rename(tmpdb, self.dbpath)
finally:
self.reopen()
def vacuum(self):
self.execute('VACUUM')
@dynamic_property
def user_version(self):
doc = 'The user version of this database'
def fget(self):
return self.conn.get('pragma user_version;', all=False)
def fset(self, val):
self.execute('pragma user_version=%d'%int(val))
return property(doc=doc, fget=fget, fset=fset)
def initialize_database(self):
metadata_sqlite = P('metadata_sqlite.sql', data=True,
allow_user_override=False).decode('utf-8')
cur = self.conn.cursor()
cur.execute('BEGIN EXCLUSIVE TRANSACTION')
try:
cur.execute(metadata_sqlite)
except:
cur.execute('ROLLBACK')
else:
cur.execute('COMMIT')
if self.user_version == 0:
self.user_version = 1
# }}}
def normpath(self, path):
path = os.path.abspath(os.path.realpath(path))
if not self.is_case_sensitive:
path = os.path.normcase(path).lower()
return path
def is_deletable(self, path):
return path and not self.normpath(self.library_path).startswith(self.normpath(path))
def rmtree(self, path):
if self.is_deletable(path):
try:
shutil.rmtree(path)
except:
import traceback
traceback.print_exc()
time.sleep(1) # In case something has temporarily locked a file
shutil.rmtree(path)
def construct_path_name(self, book_id, title, author):
'''
Construct the directory name for this book based on its metadata.
'''
book_id = ' (%d)' % book_id
l = self.PATH_LIMIT - (len(book_id) // 2) - 2
author = ascii_filename(author)[:l].decode('ascii', 'replace')
title = ascii_filename(title)[:l].decode('ascii', 'replace')
while author[-1] in (' ', '.'):
author = author[:-1]
if not author:
author = ascii_filename(_('Unknown')).decode(
'ascii', 'replace')
return '%s/%s%s' % (author, title, book_id)
def construct_file_name(self, book_id, title, author, extlen):
'''
Construct the file name for this book based on its metadata.
'''
extlen = max(extlen, 14) # 14 accounts for ORIGINAL_EPUB
# The PATH_LIMIT on windows already takes into account the doubling
# (it is used to enforce the total path length limit, individual path
# components can be much longer than the total path length would allow on
# windows).
l = (self.PATH_LIMIT - (extlen // 2) - 2) if iswindows else ((self.PATH_LIMIT - extlen - 2) // 2)
if l < 5:
raise ValueError('Extension length too long: %d' % extlen)
author = ascii_filename(author)[:l].decode('ascii', 'replace')
title = ascii_filename(title)[:l].decode('ascii', 'replace')
name = title + ' - ' + author
while name.endswith('.'):
name = name[:-1]
return name
# Database layer API {{{
def custom_table_names(self, num):
return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num
@property
def custom_tables(self):
return set([x[0] for x in self.conn.get(
'SELECT name FROM sqlite_master WHERE type="table" AND '
'(name GLOB "custom_column_*" OR name GLOB "books_custom_column_*")')])
@classmethod
def exists_at(cls, path):
return path and os.path.exists(os.path.join(path, 'metadata.db'))
@dynamic_property
def library_id(self):
doc = ('The UUID for this library. As long as the user only operates'
' on libraries with calibre, it will be unique')
def fget(self):
if getattr(self, '_library_id_', None) is None:
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
if ans is None:
ans = str(uuid.uuid4())
self.library_id = ans
else:
self._library_id_ = ans
return self._library_id_
def fset(self, val):
self._library_id_ = unicode(val)
self.execute('''
DELETE FROM library_id;
INSERT INTO library_id (uuid) VALUES (?);
''', (self._library_id_,))
return property(doc=doc, fget=fget, fset=fset)
def last_modified(self):
''' Return last modified time as a UTC datetime object '''
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
def read_tables(self):
'''
Read all data from the db into the python in-memory tables
'''
with self.conn: # Use a single transaction, to ensure nothing modifies the db while we are reading
for table in self.tables.itervalues():
try:
table.read(self)
except:
prints('Failed to read table:', table.name)
import pprint
pprint.pprint(table.metadata)
raise
def format_abspath(self, book_id, fmt, fname, path):
path = os.path.join(self.library_path, path)
fmt = ('.' + fmt.lower()) if fmt else ''
fmt_path = os.path.join(path, fname+fmt)
if os.path.exists(fmt_path):
return fmt_path
try:
candidates = glob.glob(os.path.join(path, '*'+fmt))
except: # If path contains strange characters this throws an exc
candidates = []
if fmt and candidates and os.path.exists(candidates[0]):
shutil.copyfile(candidates[0], fmt_path)
return fmt_path
def format_hash(self, book_id, fmt, fname, path):
path = self.format_abspath(book_id, fmt, fname, path)
if path is None:
raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt))
sha = hashlib.sha256()
with lopen(path, 'rb') as f:
while True:
raw = f.read(SPOOL_SIZE)
sha.update(raw)
if len(raw) < SPOOL_SIZE:
break
return sha.hexdigest()
def format_metadata(self, book_id, fmt, fname, path):
path = self.format_abspath(book_id, fmt, fname, path)
ans = {}
if path is not None:
stat = os.stat(path)
ans['path'] = path
ans['size'] = stat.st_size
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
return ans
def has_format(self, book_id, fmt, fname, path):
return self.format_abspath(book_id, fmt, fname, path) is not None
def remove_formats(self, remove_map):
paths = []
for book_id, removals in remove_map.iteritems():
for fmt, fname, path in removals:
path = self.format_abspath(book_id, fmt, fname, path)
if path is not None:
paths.append(path)
try:
delete_service().delete_files(paths, self.library_path)
except:
import traceback
traceback.print_exc()
def cover_last_modified(self, path):
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
try:
return utcfromtimestamp(os.stat(path).st_mtime)
except EnvironmentError:
pass # Cover doesn't exist
def copy_cover_to(self, path, dest, windows_atomic_move=None, use_hardlink=False):
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if os.access(path, os.R_OK) and dest and not samefile(dest, path):
windows_atomic_move.copy_path_to(path, dest)
return True
else:
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
try:
f = lopen(path, 'rb')
except (IOError, OSError) as e:
# Ensure the path that caused this error is reported
raise Exception('Failed to open %r with error: %s' % (path, e))
with f:
if hasattr(dest, 'write'):
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
return True
elif dest and not samefile(dest, path):
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
return False
def cover_or_cache(self, path, timestamp):
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
try:
stat = os.stat(path)
except EnvironmentError:
return False, None, None
if abs(timestamp - stat.st_mtime) < 0.1:
return True, None, None
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
return True, f.read(), stat.st_mtime
def set_cover(self, book_id, path, data):
path = os.path.abspath(os.path.join(self.library_path, path))
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, 'cover.jpg')
if callable(getattr(data, 'save', None)):
from calibre.gui2 import pixmap_to_data
data = pixmap_to_data(data)
elif callable(getattr(data, 'read', None)):
data = data.read()
if data is None:
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError):
time.sleep(0.2)
os.remove(path)
else:
try:
save_cover_data_to(data, path)
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
def copy_format_to(self, book_id, fmt, fname, path, dest,
windows_atomic_move=None, use_hardlink=False):
path = self.format_abspath(book_id, fmt, fname, path)
if path is None:
return False
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if dest:
if samefile(dest, path):
# Ensure that the file has the same case as dest
try:
if path != dest:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
windows_atomic_move.copy_path_to(path, dest)
else:
if hasattr(dest, 'write'):
with lopen(path, 'rb') as f:
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
elif dest:
if samefile(dest, path):
if not self.is_case_sensitive and path != dest:
# Ensure that the file has the same case as dest
try:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with lopen(path, 'rb') as f, lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
def windows_check_if_files_in_use(self, paths):
'''
Raises an EACCES IOError if any of the files in the folder of book_id
are opened in another program on windows.
'''
if iswindows:
for path in paths:
spath = os.path.join(self.library_path, *path.split('/'))
wam = None
if os.path.exists(spath):
try:
wam = WindowsAtomicFolderMove(spath)
finally:
if wam is not None:
wam.close_handles()
def add_format(self, book_id, fmt, stream, title, author, path, current_name):
fmt = ('.' + fmt.lower()) if fmt else ''
fname = self.construct_file_name(book_id, title, author, len(fmt))
path = os.path.join(self.library_path, path)
dest = os.path.join(path, fname + fmt)
if not os.path.exists(path):
os.makedirs(path)
size = 0
if current_name is not None:
old_path = os.path.join(path, current_name + fmt)
if old_path != dest:
# Ensure that the old format file is not orphaned, this can
# happen if the algorithm in construct_file_name is changed.
try:
# rename rather than remove, so that if something goes
# wrong in the rest of this function, at least the file is
# not deleted
os.rename(old_path, dest)
except EnvironmentError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
# Failing to rename the old format will at worst leave a
# harmless orphan, so log and ignore the error
import traceback
traceback.print_exc()
if (not getattr(stream, 'name', False) or not samefile(dest, stream.name)):
with lopen(dest, 'wb') as f:
shutil.copyfileobj(stream, f)
size = f.tell()
elif os.path.exists(dest):
size = os.path.getsize(dest)
return size, fname
def update_path(self, book_id, title, author, path_field, formats_field):
path = self.construct_path_name(book_id, title, author)
current_path = path_field.for_book(book_id, default_value='')
formats = formats_field.for_book(book_id, default_value=())
try:
extlen = max(len(fmt) for fmt in formats) + 1
except ValueError:
extlen = 10
fname = self.construct_file_name(book_id, title, author, extlen)
# Check if the metadata used to construct paths has changed
changed = False
for fmt in formats:
name = formats_field.format_fname(book_id, fmt)
if name and name != fname:
changed = True
break
if path == current_path and not changed:
return
spath = os.path.join(self.library_path, *current_path.split('/'))
tpath = os.path.join(self.library_path, *path.split('/'))
source_ok = current_path and os.path.exists(spath)
wam = WindowsAtomicFolderMove(spath) if iswindows and source_ok else None
format_map = {}
original_format_map = {}
try:
if not os.path.exists(tpath):
os.makedirs(tpath)
if source_ok: # Migrate existing files
dest = os.path.join(tpath, 'cover.jpg')
self.copy_cover_to(current_path, dest,
windows_atomic_move=wam, use_hardlink=True)
for fmt in formats:
dest = os.path.join(tpath, fname+'.'+fmt.lower())
format_map[fmt] = dest
ofmt_fname = formats_field.format_fname(book_id, fmt)
original_format_map[fmt] = os.path.join(spath, ofmt_fname+'.'+fmt.lower())
self.copy_format_to(book_id, fmt, ofmt_fname, current_path,
dest, windows_atomic_move=wam, use_hardlink=True)
# Update db to reflect new file locations
for fmt in formats:
formats_field.table.set_fname(book_id, fmt, fname, self)
path_field.table.set_path(book_id, path, self)
# Delete not needed files and directories
if source_ok:
if os.path.exists(spath):
if samefile(spath, tpath):
# The format filenames may have changed while the folder
# name remains the same
for fmt, opath in original_format_map.iteritems():
npath = format_map.get(fmt, None)
if npath and os.path.abspath(npath.lower()) != os.path.abspath(opath.lower()) and samefile(opath, npath):
# opath and npath are different hard links to the same file
os.unlink(opath)
else:
if wam is not None:
wam.delete_originals()
self.rmtree(spath)
parent = os.path.dirname(spath)
if len(os.listdir(parent)) == 0:
self.rmtree(parent)
finally:
if wam is not None:
wam.close_handles()
curpath = self.library_path
c1, c2 = current_path.split('/'), path.split('/')
if not self.is_case_sensitive and len(c1) == len(c2):
# On case-insensitive systems, title and author renames that only
# change case don't cause any changes to the directories in the file
# system. This can lead to having the directory names not match the
# title/author, which leads to trouble when libraries are copied to
# a case-sensitive system. The following code attempts to fix this
# by checking each segment. If they are different because of case,
# then rename the segment. Note that the code above correctly
# handles files in the directories, so no need to do them here.
for oldseg, newseg in zip(c1, c2):
if oldseg.lower() == newseg.lower() and oldseg != newseg:
try:
os.rename(os.path.join(curpath, oldseg),
os.path.join(curpath, newseg))
except:
break # Fail silently since nothing catastrophic has happened
curpath = os.path.join(curpath, newseg)
def write_backup(self, path, raw):
path = os.path.abspath(os.path.join(self.library_path, path, 'metadata.opf'))
try:
with lopen(path, 'wb') as f:
f.write(raw)
except EnvironmentError:
os.makedirs(os.path.dirname(path))
with lopen(path, 'wb') as f:
f.write(raw)
def read_backup(self, path):
path = os.path.abspath(os.path.join(self.library_path, path, 'metadata.opf'))
with lopen(path, 'rb') as f:
return f.read()
def remove_books(self, path_map, permanent=False):
self.executemany(
'DELETE FROM books WHERE id=?', [(x,) for x in path_map])
paths = {os.path.join(self.library_path, x) for x in path_map.itervalues() if x}
paths = {x for x in paths if os.path.exists(x) and self.is_deletable(x)}
if permanent:
for path in paths:
self.rmtree(path)
remove_dir_if_empty(os.path.dirname(path), ignore_metadata_caches=True)
else:
delete_service().delete_books(paths, self.library_path)
def add_custom_data(self, name, val_map, delete_first):
if delete_first:
self.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in val_map.iteritems()])
def get_custom_book_data(self, name, book_ids, default=None):
book_ids = frozenset(book_ids)
def safe_load(val):
try:
return json.loads(val, object_hook=from_json)
except:
return default
if len(book_ids) == 1:
bid = next(iter(book_ids))
ans = {book_id:safe_load(val) for book_id, val in
self.execute('SELECT book, val FROM books_plugin_data WHERE book=? AND name=?', (bid, name))}
return ans or {bid:default}
ans = {}
for book_id, val in self.execute(
'SELECT book, val FROM books_plugin_data WHERE name=?', (name,)):
if not book_ids or book_id in book_ids:
val = safe_load(val)
ans[book_id] = val
return ans
def delete_custom_book_data(self, name, book_ids):
if book_ids:
self.executemany('DELETE FROM books_plugin_data WHERE book=? AND name=?',
[(book_id, name) for book_id in book_ids])
else:
self.execute('DELETE FROM books_plugin_data WHERE name=?', (name,))
def get_ids_for_custom_book_data(self, name):
return frozenset(r[0] for r in self.execute('SELECT book FROM books_plugin_data WHERE name=?', (name,)))
def conversion_options(self, book_id, fmt):
for (data,) in self.conn.get('SELECT data FROM conversion_options WHERE book=? AND format=?', (book_id, fmt.upper())):
if data:
return cPickle.loads(bytes(data))
def has_conversion_options(self, ids, fmt='PIPE'):
ids = frozenset(ids)
with self.conn:
self.execute('DROP TABLE IF EXISTS conversion_options_temp; CREATE TEMP TABLE conversion_options_temp (id INTEGER PRIMARY KEY);')
self.executemany('INSERT INTO conversion_options_temp VALUES (?)', [(x,) for x in ids])
for (book_id,) in self.conn.get(
'SELECT book FROM conversion_options WHERE format=? AND book IN (SELECT id FROM conversion_options_temp)', (fmt.upper(),)):
return True
return False
def delete_conversion_options(self, book_ids, fmt):
self.executemany('DELETE FROM conversion_options WHERE book=? AND format=?',
[(book_id, fmt.upper()) for book_id in book_ids])
def set_conversion_options(self, options, fmt):
options = [(book_id, fmt.upper(), buffer(cPickle.dumps(data, -1))) for book_id, data in options.iteritems()]
self.executemany('INSERT OR REPLACE INTO conversion_options(book,format,data) VALUES (?,?,?)', options)
def get_top_level_move_items(self, all_paths):
items = set(os.listdir(self.library_path))
paths = set(all_paths)
paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
path_map = {x:x for x in paths}
if not self.is_case_sensitive:
for x in items:
path_map[x.lower()] = x
items = {x.lower() for x in items}
paths = {x.lower() for x in paths}
items = items.intersection(paths)
return items, path_map
def move_library_to(self, all_paths, newloc, progress=lambda x: x):
if not os.path.exists(newloc):
os.makedirs(newloc)
old_dirs = set()
items, path_map = self.get_top_level_move_items(all_paths)
for x in items:
src = os.path.join(self.library_path, x)
dest = os.path.join(newloc, path_map[x])
if os.path.isdir(src):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
old_dirs.add(src)
else:
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
x = path_map[x]
if not isinstance(x, unicode):
x = x.decode(filesystem_encoding, 'replace')
progress(x)
dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
opath = self.dbpath
self.conn.close()
self.library_path, self.dbpath = newloc, dbpath
if self._conn is not None:
self._conn.close()
self._conn = None
self.conn
try:
os.unlink(opath)
except:
pass
for loc in old_dirs:
try:
shutil.rmtree(loc)
except:
pass
def restore_book(self, book_id, path, formats):
self.execute('UPDATE books SET path=? WHERE id=?', (path.replace(os.sep, '/'), book_id))
vals = [(book_id, fmt, size, name) for fmt, size, name in formats]
self.executemany('INSERT INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)', vals)
# }}}
| gpl-3.0 | -5,460,101,433,638,921,000 | 40.141742 | 141 | 0.527146 | false |
brainwane/missing-from-wikipedia | webapp/application.py | 1 | 1721 | # -*- coding: utf-8 -*-
from flask import Flask, render_template, request
import missing
app = Flask(__name__)
# take in names from datainput.html form
# run massagenames (implicitly chunks into 50 titles per request) and leftout
# return result to user in results.html form
def onWikipedia(names, lang):
names = missing.massagenames(names)
resultlist = missing.leftout(names, lang)
stats = missing.generate_statistics(resultlist, names)
return names, resultlist, stats
def askedToCheck(listofstrings):
l = len(listofstrings)
if l == 1:
return listofstrings[0]
elif l <= 4:
return ", ".join(listofstrings)
elif l > 4:
return "%s phrases: %s, %s... %s, %s" % (l, listofstrings[0], listofstrings[1], listofstrings[-2], listofstrings[-1])
@app.route('/index', methods=['GET', 'POST']) # form in template
def index():
if request.method == 'GET':
print "we did a get"
return render_template('datainput.html')
else: # request was POST
print "we did a POST!"
if 'pagename' in request.form:
namestocheck, language = request.form['pagename'].encode('utf-8'), request.form['langname']
namestocheck = namestocheck.split('\r\n')
else:
namefilestorage, language = request.files[('fileofnames')].stream, request.form['langname']
namestocheck = [line.strip('\n').decode('utf-8') for line in namefilestorage]
orig, checkresult, statistics = onWikipedia(namestocheck, language)
return render_template('results.html', checkname=askedToCheck(orig), result=checkresult, stats=statistics, target_lang=language)
if __name__ == "__main__":
app.run(debug=True)
| gpl-3.0 | -7,242,098,130,690,440,000 | 35.617021 | 136 | 0.655433 | false |
yelizariev/addons-yelizariev | odoo_backup_sh/tests/test_odoo_backup_sh_auto_rotation.py | 1 | 2946 | # Copyright 2018 Stanislav Krotov <https://it-projects.info/team/ufaks>
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from datetime import datetime, timedelta
import odoo.tests.common
@odoo.tests.common.at_install(False)
@odoo.tests.common.post_install(True)
class TestOdooBackupSh(odoo.tests.common.TransactionCase):
def test_compute_auto_rotation_backup_dts(self):
dt_start = datetime(2018, 10, 1, 12, 0, 0, 0)
backup_dts = [
dt_start, # 0, it's a monday, week 40, month 10
dt_start - timedelta(hours=3), # 1
dt_start - timedelta(days=1), # 2, week 39, month 9
dt_start - timedelta(days=3), # 3
dt_start - timedelta(days=10), # 4, week 38
dt_start - timedelta(days=21), # 5
dt_start - timedelta(days=42), # 6, month 8
dt_start - timedelta(days=367), # 7
dt_start - timedelta(days=734), # 8
]
self.assertEqual(
self.env["odoo_backup_sh.config"].compute_auto_rotation_backup_dts(
backup_dts, hourly=4
),
[dt for dt in backup_dts if backup_dts.index(dt) in [0, 1, 2, 3]],
)
self.assertEqual(
self.env["odoo_backup_sh.config"].compute_auto_rotation_backup_dts(
backup_dts, daily=3
),
[dt for dt in backup_dts if backup_dts.index(dt) in [0, 2, 3]],
)
self.assertEqual(
self.env["odoo_backup_sh.config"].compute_auto_rotation_backup_dts(
backup_dts, weekly=10
),
[dt for dt in backup_dts if backup_dts.index(dt) in [0, 2, 4, 5, 6, 7, 8]],
)
self.assertEqual(
self.env["odoo_backup_sh.config"].compute_auto_rotation_backup_dts(
backup_dts, monthly=3
),
[dt for dt in backup_dts if backup_dts.index(dt) in [0, 2, 6]],
)
self.assertEqual(
self.env["odoo_backup_sh.config"].compute_auto_rotation_backup_dts(
backup_dts, yearly=2
),
[dt for dt in backup_dts if backup_dts.index(dt) in [0, 7]],
)
self.assertEqual(
self.env["odoo_backup_sh.config"].compute_auto_rotation_backup_dts(
backup_dts, hourly=2, daily=2, weekly=3
),
[dt for dt in backup_dts if backup_dts.index(dt) in [0, 1, 2, 4]],
)
dt_start = datetime(2018, 10, 1, 12, 0, 0, 0)
backup_dts = [
dt_start - timedelta(minutes=1),
dt_start - timedelta(minutes=3),
dt_start - timedelta(minutes=6),
dt_start - timedelta(minutes=9),
]
self.assertEqual(
self.env["odoo_backup_sh.config"].compute_auto_rotation_backup_dts(
backup_dts, hourly=1000
),
[backup_dts[i] for i in [0]],
)
| lgpl-3.0 | -7,109,469,045,742,021,000 | 39.356164 | 87 | 0.540054 | false |
kennedyshead/home-assistant | homeassistant/helpers/entity_platform.py | 1 | 26292 | """Class to manage the entities for a single platform."""
from __future__ import annotations
import asyncio
from collections.abc import Coroutine, Iterable
from contextvars import ContextVar
from datetime import datetime, timedelta
import logging
from logging import Logger
from types import ModuleType
from typing import TYPE_CHECKING, Any, Callable
from typing_extensions import Protocol
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
ATTR_RESTORED,
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_STARTED,
)
from homeassistant.core import (
CALLBACK_TYPE,
CoreState,
HomeAssistant,
ServiceCall,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import (
HomeAssistantError,
PlatformNotReady,
RequiredParameterMissing,
)
from homeassistant.setup import async_start_setup
from homeassistant.util.async_ import run_callback_threadsafe
from . import (
config_validation as cv,
device_registry as dev_reg,
entity_registry as ent_reg,
service,
)
from .device_registry import DeviceRegistry
from .entity_registry import DISABLED_INTEGRATION, EntityRegistry
from .event import async_call_later, async_track_time_interval
from .typing import ConfigType, DiscoveryInfoType
if TYPE_CHECKING:
from .entity import Entity
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
SLOW_ADD_ENTITY_MAX_WAIT = 15 # Per Entity
SLOW_ADD_MIN_TIMEOUT = 500
PLATFORM_NOT_READY_RETRIES = 10
DATA_ENTITY_PLATFORM = "entity_platform"
PLATFORM_NOT_READY_BASE_WAIT_TIME = 30 # seconds
_LOGGER = logging.getLogger(__name__)
class AddEntitiesCallback(Protocol):
"""Protocol type for EntityPlatform.add_entities callback."""
def __call__(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Define add_entities type."""
class EntityPlatform:
"""Manage the entities for a single platform."""
def __init__(
self,
*,
hass: HomeAssistant,
logger: Logger,
domain: str,
platform_name: str,
platform: ModuleType | None,
scan_interval: timedelta,
entity_namespace: str | None,
) -> None:
"""Initialize the entity platform."""
self.hass = hass
self.logger = logger
self.domain = domain
self.platform_name = platform_name
self.platform = platform
self.scan_interval = scan_interval
self.entity_namespace = entity_namespace
self.config_entry: config_entries.ConfigEntry | None = None
self.entities: dict[str, Entity] = {}
self._tasks: list[asyncio.Future] = []
# Stop tracking tasks after setup is completed
self._setup_complete = False
# Method to cancel the state change listener
self._async_unsub_polling: CALLBACK_TYPE | None = None
# Method to cancel the retry of setup
self._async_cancel_retry_setup: CALLBACK_TYPE | None = None
self._process_updates: asyncio.Lock | None = None
self.parallel_updates: asyncio.Semaphore | None = None
# Platform is None for the EntityComponent "catch-all" EntityPlatform
# which powers entity_component.add_entities
self.parallel_updates_created = platform is None
hass.data.setdefault(DATA_ENTITY_PLATFORM, {}).setdefault(
self.platform_name, []
).append(self)
def __repr__(self) -> str:
"""Represent an EntityPlatform."""
return f"<EntityPlatform domain={self.domain} platform_name={self.platform_name} config_entry={self.config_entry}>"
@callback
def _get_parallel_updates_semaphore(
self, entity_has_async_update: bool
) -> asyncio.Semaphore | None:
"""Get or create a semaphore for parallel updates.
Semaphore will be created on demand because we base it off if update method is async or not.
If parallel updates is set to 0, we skip the semaphore.
If parallel updates is set to a number, we initialize the semaphore to that number.
The default value for parallel requests is decided based on the first entity that is added to Home Assistant.
It's 0 if the entity defines the async_update method, else it's 1.
"""
if self.parallel_updates_created:
return self.parallel_updates
self.parallel_updates_created = True
parallel_updates = getattr(self.platform, "PARALLEL_UPDATES", None)
if parallel_updates is None and not entity_has_async_update:
parallel_updates = 1
if parallel_updates == 0:
parallel_updates = None
if parallel_updates is not None:
self.parallel_updates = asyncio.Semaphore(parallel_updates)
return self.parallel_updates
async def async_setup(
self,
platform_config: ConfigType,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the platform from a config file."""
platform = self.platform
hass = self.hass
if not hasattr(platform, "async_setup_platform") and not hasattr(
platform, "setup_platform"
):
self.logger.error(
"The %s platform for the %s integration does not support platform setup. Please remove it from your config.",
self.platform_name,
self.domain,
)
return
@callback
def async_create_setup_task() -> Coroutine:
"""Get task to set up platform."""
if getattr(platform, "async_setup_platform", None):
return platform.async_setup_platform( # type: ignore
hass,
platform_config,
self._async_schedule_add_entities,
discovery_info,
)
# This should not be replaced with hass.async_add_job because
# we don't want to track this task in case it blocks startup.
return hass.loop.run_in_executor( # type: ignore[return-value]
None,
platform.setup_platform, # type: ignore
hass,
platform_config,
self._schedule_add_entities,
discovery_info,
)
await self._async_setup_platform(async_create_setup_task)
async def async_shutdown(self) -> None:
"""Call when Home Assistant is stopping."""
self.async_cancel_retry_setup()
self.async_unsub_polling()
@callback
def async_cancel_retry_setup(self) -> None:
"""Cancel retry setup."""
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
async def async_setup_entry(self, config_entry: config_entries.ConfigEntry) -> bool:
"""Set up the platform from a config entry."""
# Store it so that we can save config entry ID in entity registry
self.config_entry = config_entry
platform = self.platform
@callback
def async_create_setup_task() -> Coroutine:
"""Get task to set up platform."""
config_entries.current_entry.set(config_entry)
return platform.async_setup_entry( # type: ignore[no-any-return,union-attr]
self.hass, config_entry, self._async_schedule_add_entities
)
return await self._async_setup_platform(async_create_setup_task)
async def _async_setup_platform(
self, async_create_setup_task: Callable[[], Coroutine], tries: int = 0
) -> bool:
"""Set up a platform via config file or config entry.
async_create_setup_task creates a coroutine that sets up platform.
"""
current_platform.set(self)
logger = self.logger
hass = self.hass
full_name = f"{self.domain}.{self.platform_name}"
logger.info("Setting up %s", full_name)
warn_task = hass.loop.call_later(
SLOW_SETUP_WARNING,
logger.warning,
"Setup of %s platform %s is taking over %s seconds.",
self.domain,
self.platform_name,
SLOW_SETUP_WARNING,
)
with async_start_setup(hass, [full_name]):
try:
task = async_create_setup_task()
async with hass.timeout.async_timeout(SLOW_SETUP_MAX_WAIT, self.domain):
await asyncio.shield(task)
# Block till all entities are done
while self._tasks:
pending = [task for task in self._tasks if not task.done()]
self._tasks.clear()
if pending:
await asyncio.gather(*pending)
hass.config.components.add(full_name)
self._setup_complete = True
return True
except PlatformNotReady as ex:
tries += 1
wait_time = min(tries, 6) * PLATFORM_NOT_READY_BASE_WAIT_TIME
message = str(ex)
ready_message = f"ready yet: {message}" if message else "ready yet"
if tries == 1:
logger.warning(
"Platform %s not %s; Retrying in background in %d seconds",
self.platform_name,
ready_message,
wait_time,
)
else:
logger.debug(
"Platform %s not %s; Retrying in %d seconds",
self.platform_name,
ready_message,
wait_time,
)
async def setup_again(*_args: Any) -> None:
"""Run setup again."""
self._async_cancel_retry_setup = None
await self._async_setup_platform(async_create_setup_task, tries)
if hass.state == CoreState.running:
self._async_cancel_retry_setup = async_call_later(
hass, wait_time, setup_again
)
else:
self._async_cancel_retry_setup = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, setup_again
)
return False
except asyncio.TimeoutError:
logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
self.platform_name,
SLOW_SETUP_MAX_WAIT,
)
return False
except Exception: # pylint: disable=broad-except
logger.exception(
"Error while setting up %s platform for %s",
self.platform_name,
self.domain,
)
return False
finally:
warn_task.cancel()
def _schedule_add_entities(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Schedule adding entities for a single platform, synchronously."""
run_callback_threadsafe(
self.hass.loop,
self._async_schedule_add_entities,
list(new_entities),
update_before_add,
).result()
@callback
def _async_schedule_add_entities(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Schedule adding entities for a single platform async."""
task = self.hass.async_create_task(
self.async_add_entities(new_entities, update_before_add=update_before_add),
)
if not self._setup_complete:
self._tasks.append(task)
def add_entities(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Add entities for a single platform."""
# That avoid deadlocks
if update_before_add:
self.logger.warning(
"Call 'add_entities' with update_before_add=True "
"only inside tests or you can run into a deadlock!"
)
asyncio.run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), update_before_add),
self.hass.loop,
).result()
async def async_add_entities(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Add entities for a single platform async.
This method must be run in the event loop.
"""
# handle empty list from component/platform
if not new_entities:
return
hass = self.hass
device_registry = dev_reg.async_get(hass)
entity_registry = ent_reg.async_get(hass)
tasks = [
self._async_add_entity(
entity, update_before_add, entity_registry, device_registry
)
for entity in new_entities
]
# No entities for processing
if not tasks:
return
timeout = max(SLOW_ADD_ENTITY_MAX_WAIT * len(tasks), SLOW_ADD_MIN_TIMEOUT)
try:
async with self.hass.timeout.async_timeout(timeout, self.domain):
await asyncio.gather(*tasks)
except asyncio.TimeoutError:
self.logger.warning(
"Timed out adding entities for domain %s with platform %s after %ds",
self.domain,
self.platform_name,
timeout,
)
except Exception:
self.logger.exception(
"Error adding entities for domain %s with platform %s",
self.domain,
self.platform_name,
)
raise
if (
(self.config_entry and self.config_entry.pref_disable_polling)
or self._async_unsub_polling is not None
or not any(entity.should_poll for entity in self.entities.values())
):
return
self._async_unsub_polling = async_track_time_interval(
self.hass,
self._update_entity_states,
self.scan_interval,
)
async def _async_add_entity( # noqa: C901
self,
entity: Entity,
update_before_add: bool,
entity_registry: EntityRegistry,
device_registry: DeviceRegistry,
) -> None:
"""Add an entity to the platform."""
if entity is None:
raise ValueError("Entity cannot be None")
entity.add_to_platform_start(
self.hass,
self,
self._get_parallel_updates_semaphore(hasattr(entity, "async_update")),
)
# Update properties before we generate the entity_id
if update_before_add:
try:
await entity.async_device_update(warning=False)
except Exception: # pylint: disable=broad-except
self.logger.exception("%s: Error on device update!", self.platform_name)
entity.add_to_platform_abort()
return
requested_entity_id = None
suggested_object_id: str | None = None
generate_new_entity_id = False
# Get entity_id from unique ID registration
if entity.unique_id is not None:
if entity.entity_id is not None:
requested_entity_id = entity.entity_id
suggested_object_id = split_entity_id(entity.entity_id)[1]
else:
suggested_object_id = entity.name # type: ignore[unreachable]
if self.entity_namespace is not None:
suggested_object_id = f"{self.entity_namespace} {suggested_object_id}"
if self.config_entry is not None:
config_entry_id: str | None = self.config_entry.entry_id
else:
config_entry_id = None
device_info = entity.device_info
device_id = None
if config_entry_id is not None and device_info is not None:
processed_dev_info = {"config_entry_id": config_entry_id}
for key in (
"connections",
"identifiers",
"manufacturer",
"model",
"name",
"default_manufacturer",
"default_model",
"default_name",
"sw_version",
"entry_type",
"via_device",
"suggested_area",
):
if key in device_info:
processed_dev_info[key] = device_info[key] # type: ignore[misc]
try:
device = device_registry.async_get_or_create(**processed_dev_info) # type: ignore[arg-type]
device_id = device.id
except RequiredParameterMissing:
pass
disabled_by: str | None = None
if not entity.entity_registry_enabled_default:
disabled_by = DISABLED_INTEGRATION
entry = entity_registry.async_get_or_create(
self.domain,
self.platform_name,
entity.unique_id,
suggested_object_id=suggested_object_id,
config_entry=self.config_entry,
device_id=device_id,
known_object_ids=self.entities.keys(),
disabled_by=disabled_by,
capabilities=entity.capability_attributes,
supported_features=entity.supported_features,
device_class=entity.device_class,
unit_of_measurement=entity.unit_of_measurement,
original_name=entity.name,
original_icon=entity.icon,
)
entity.registry_entry = entry
entity.entity_id = entry.entity_id
if entry.disabled:
self.logger.info(
"Not adding entity %s because it's disabled",
entry.name
or entity.name
or f'"{self.platform_name} {entity.unique_id}"',
)
entity.add_to_platform_abort()
return
# We won't generate an entity ID if the platform has already set one
# We will however make sure that platform cannot pick a registered ID
elif entity.entity_id is not None and entity_registry.async_is_registered(
entity.entity_id
):
# If entity already registered, convert entity id to suggestion
suggested_object_id = split_entity_id(entity.entity_id)[1]
generate_new_entity_id = True
# Generate entity ID
if entity.entity_id is None or generate_new_entity_id:
suggested_object_id = (
suggested_object_id or entity.name or DEVICE_DEFAULT_NAME
)
if self.entity_namespace is not None:
suggested_object_id = f"{self.entity_namespace} {suggested_object_id}"
entity.entity_id = entity_registry.async_generate_entity_id(
self.domain, suggested_object_id, self.entities.keys()
)
# Make sure it is valid in case an entity set the value themselves
if not valid_entity_id(entity.entity_id):
entity.add_to_platform_abort()
raise HomeAssistantError(f"Invalid entity ID: {entity.entity_id}")
already_exists = entity.entity_id in self.entities
restored = False
if not already_exists and not self.hass.states.async_available(
entity.entity_id
):
existing = self.hass.states.get(entity.entity_id)
if existing is not None and ATTR_RESTORED in existing.attributes:
restored = True
else:
already_exists = True
if already_exists:
if entity.unique_id is not None:
msg = f"Platform {self.platform_name} does not generate unique IDs. "
if requested_entity_id:
msg += f"ID {entity.unique_id} is already used by {entity.entity_id} - ignoring {requested_entity_id}"
else:
msg += f"ID {entity.unique_id} already exists - ignoring {entity.entity_id}"
else:
msg = f"Entity id already exists - ignoring: {entity.entity_id}"
self.logger.error(msg)
entity.add_to_platform_abort()
return
entity_id = entity.entity_id
self.entities[entity_id] = entity
if not restored:
# Reserve the state in the state machine
# because as soon as we return control to the event
# loop below, another entity could be added
# with the same id before `entity.add_to_platform_finish()`
# has a chance to finish.
self.hass.states.async_reserve(entity.entity_id)
def remove_entity_cb() -> None:
"""Remove entity from entities list."""
self.entities.pop(entity_id)
entity.async_on_remove(remove_entity_cb)
await entity.add_to_platform_finish()
async def async_reset(self) -> None:
"""Remove all entities and reset data.
This method must be run in the event loop.
"""
self.async_cancel_retry_setup()
if not self.entities:
return
tasks = [entity.async_remove() for entity in self.entities.values()]
await asyncio.gather(*tasks)
self.async_unsub_polling()
self._setup_complete = False
@callback
def async_unsub_polling(self) -> None:
"""Stop polling."""
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
async def async_destroy(self) -> None:
"""Destroy an entity platform.
Call before discarding the object.
"""
await self.async_reset()
self.hass.data[DATA_ENTITY_PLATFORM][self.platform_name].remove(self)
async def async_remove_entity(self, entity_id: str) -> None:
"""Remove entity id from platform."""
await self.entities[entity_id].async_remove()
# Clean up polling job if no longer needed
if self._async_unsub_polling is not None and not any(
entity.should_poll for entity in self.entities.values()
):
self._async_unsub_polling()
self._async_unsub_polling = None
async def async_extract_from_service(
self, service_call: ServiceCall, expand_group: bool = True
) -> list[Entity]:
"""Extract all known and available entities from a service call.
Will return an empty list if entities specified but unknown.
This method must be run in the event loop.
"""
return await service.async_extract_entities(
self.hass, self.entities.values(), service_call, expand_group
)
@callback
def async_register_entity_service(
self,
name: str,
schema: dict | vol.Schema,
func: str | Callable[..., Any],
required_features: Iterable[int] | None = None,
) -> None:
"""Register an entity service.
Services will automatically be shared by all platforms of the same domain.
"""
if self.hass.services.has_service(self.platform_name, name):
return
if isinstance(schema, dict):
schema = cv.make_entity_service_schema(schema)
async def handle_service(call: ServiceCall) -> None:
"""Handle the service."""
await service.entity_service_call(
self.hass,
[
plf
for plf in self.hass.data[DATA_ENTITY_PLATFORM][self.platform_name]
if plf.domain == self.domain
],
func,
call,
required_features,
)
self.hass.services.async_register(
self.platform_name, name, handle_service, schema
)
async def _update_entity_states(self, now: datetime) -> None:
"""Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop.
"""
if self._process_updates is None:
self._process_updates = asyncio.Lock()
if self._process_updates.locked():
self.logger.warning(
"Updating %s %s took longer than the scheduled update interval %s",
self.platform_name,
self.domain,
self.scan_interval,
)
return
async with self._process_updates:
tasks = []
for entity in self.entities.values():
if not entity.should_poll:
continue
tasks.append(entity.async_update_ha_state(True))
if tasks:
await asyncio.gather(*tasks)
current_platform: ContextVar[EntityPlatform | None] = ContextVar(
"current_platform", default=None
)
@callback
def async_get_current_platform() -> EntityPlatform:
"""Get the current platform from context."""
platform = current_platform.get()
if platform is None:
raise RuntimeError("Cannot get non-set current platform")
return platform
@callback
def async_get_platforms(
hass: HomeAssistant, integration_name: str
) -> list[EntityPlatform]:
"""Find existing platforms."""
if (
DATA_ENTITY_PLATFORM not in hass.data
or integration_name not in hass.data[DATA_ENTITY_PLATFORM]
):
return []
platforms: list[EntityPlatform] = hass.data[DATA_ENTITY_PLATFORM][integration_name]
return platforms
| apache-2.0 | 6,382,021,984,624,040,000 | 34.577808 | 125 | 0.571543 | false |
robbievanleeuwen/section-properties | sectionproperties/examples/example_frame.py | 1 | 1555 | import time
import numpy as np
import matplotlib.pyplot as plt
import sectionproperties.pre.sections as sections
from sectionproperties.analysis.cross_section import CrossSection
# create a rectangular section
geometry = sections.RectangularSection(d=100, b=50)
# create a list of mesh sizes to analyse
mesh_sizes = [1.5, 2, 2.5, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50, 75, 100]
j_calc = [] # list to store torsion constants
t_calc = [] # list to store computation times
# loop through mesh sizes
for mesh_size in mesh_sizes:
mesh = geometry.create_mesh(mesh_sizes=[mesh_size]) # create mesh
section = CrossSection(geometry, mesh) # create a CrossSection object
start_time = time.time() # start timing
# calculate the frame properties
(_, _, _, _, j, _) = section.calculate_frame_properties()
t = time.time() - start_time # stop timing
t_calc.append(t) # save the time
j_calc.append(j) # save the torsion constant
# print the result
msg = "Mesh Size: {0}; ".format(mesh_size)
msg += "Solution Time {0:.5f} s; ".format(t)
msg += "Torsion Constant: {0:.12e}".format(j)
print(msg)
correct_val = j_calc[0] # assume the finest mesh gives the 'correct' value
j_np = np.array(j_calc) # convert results to a numpy array
error_vals = (j_calc - correct_val) / j_calc * 100 # compute the error
# produce a plot of the accuracy of the torsion constant with computation time
plt.loglog(t_calc[1:], error_vals[1:], 'kx-')
plt.xlabel("Solver Time [s]")
plt.ylabel("Torsion Constant Error [%]")
plt.show()
| mit | -2,314,972,156,369,502,700 | 38.871795 | 78 | 0.684887 | false |
F-Secure/lokki-wp8 | scripts/common.py | 1 | 1958 | """
Copyright (c) 2014-2015 F-Secure
See LICENSE for details
"""
"""
Common methods to be used in converting localization files.
Copyright: F-Secure, 2012
"""
#defult product name in localization files
PRODUCT_NAME_DEFAULT_VALUE = "F-Secure Mobile Sync"
# id that defines default product name in localization file
PRODUCT_NAME_ID = "PRODUCT_NAME_LONG"
def find_node(nodes, string_id):
"""
Searches nodes and finds the node which contains attribute 'string_id'
Raises exception if suitable node is not found
"""
for node in nodes:
current_id = node.getAttribute("id")
if current_id == string_id:
return node
raise Exception("find_node failed! " + string_id + " was not found from nodes." )
class LocCustomizer():
def __init__(self):
self.productName = ""
def convert_product_name(self, string_value, string_id):
"""
Replaces product name from string_value if it exists
NOTE that when this method is called first time it should be called by using
PRODUCT_NAME_ID as a string_id value, so that customized product name is set correctly
"""
#Set correct product name
if string_id == PRODUCT_NAME_ID:
#Remove quotes for the begin and end of the string if they exists
if string_value[0] == "\"" and string_value[len(string_value)-1] == "\"":
self.productName = string_value[1:-1]
else:
self.productName = string_value
else:
if self.productName == "":
raise Exception("Product name is not set. It should be first item in localization xml")
if self.productName != PRODUCT_NAME_DEFAULT_VALUE:
#Default product name has been changed. Change that also from this string if it exists
string_value = string_value.replace(PRODUCT_NAME_DEFAULT_VALUE, self.productName)
return string_value
| apache-2.0 | -7,674,815,097,206,625,000 | 37.392157 | 103 | 0.643514 | false |
fr0uty/oartm | tests/kao/test_db_kamelot_fifo.py | 1 | 1412 | # coding: utf-8
from __future__ import unicode_literals, print_function
import pytest
from oar.lib import db
from oar.kao.job import insert_job
from oar.kao.platform import Platform
from oar.kao.kamelot_fifo import main, schedule_fifo_cycle
@pytest.yield_fixture(scope='function', autouse=True)
def minimal_db_initialization(request):
with db.session(ephemeral=True):
yield
def test_db_kamelot_fifo_no_hierarchy():
# add some resources
for i in range(5):
db['Resource'].create(network_address="localhost")
for i in range(5):
insert_job(res=[(60, [('resource_id=2', "")])], properties="")
main()
req = db['GanttJobsPrediction'].query.all()
# for i, r in enumerate(req):
# print "req:", r.moldable_id, r.start_time
assert len(req) == 2
def test_db_kamelot_fifo_w_hierarchy():
# add some resources
for i in range(5):
db['Resource'].create(network_address="localhost" + str(int(i / 2)))
for res in db['Resource'].query.all():
print(res.id, res.network_address)
for i in range(5):
insert_job(res=[(60, [('network_address=1', "")])],
properties="")
plt = Platform()
schedule_fifo_cycle(plt, "default", True)
req = db['GanttJobsPrediction'].query.all()
# for i, r in enumerate(req):
# print("req:", r.moldable_id, r.start_time)
assert len(req) == 3
| bsd-3-clause | 5,573,465,817,564,996,000 | 23.344828 | 76 | 0.626062 | false |
willforde/script.module.codequick | script.module.codequick/lib/codequick/support.py | 1 | 14851 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
# Standard Library Imports
import importlib
import binascii
import inspect
import logging
import time
import sys
import re
# Kodi imports
import xbmcaddon
import xbmcgui
import xbmc
# Package imports
from codequick.utils import parse_qs, ensure_native_str, urlparse, PY3, unicode_type
try:
# noinspection PyPep8Naming
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
if PY3:
from inspect import getfullargspec
PICKLE_PROTOCOL = 4
else:
# noinspection PyDeprecation
from inspect import getargspec as getfullargspec
PICKLE_PROTOCOL = 2
script_data = xbmcaddon.Addon("script.module.codequick")
addon_data = xbmcaddon.Addon()
plugin_id = addon_data.getAddonInfo("id")
logger_id = re.sub("[ .]", "-", addon_data.getAddonInfo("name"))
# Logger specific to this module
logger = logging.getLogger("%s.support" % logger_id)
# Listitem auto sort methods
auto_sort = set()
logging_map = {
10: xbmc.LOGDEBUG,
20: xbmc.LOGINFO if PY3 else xbmc.LOGNOTICE,
30: xbmc.LOGWARNING,
40: xbmc.LOGERROR,
50: xbmc.LOGFATAL,
}
class RouteMissing(KeyError):
"""
Exception class that is raisd when no
route is found in the registered routes.
"""
class KodiLogHandler(logging.Handler):
"""
Custom Logger Handler to forward logs to Kodi.
Log records will automatically be converted from unicode to utf8 encoded strings.
All debug messages will be stored locally and outputed as warning messages if a critical error occurred.
This is done so that debug messages will appear on the normal kodi log file without having to enable debug logging.
:ivar debug_msgs: Local store of degub messages.
"""
def __init__(self):
super(KodiLogHandler, self).__init__()
self.setFormatter(logging.Formatter("[%(name)s] %(message)s"))
self.debug_msgs = []
def emit(self, record): # type: (logging.LogRecord) -> None
"""Forward the log record to kodi, lets kodi handle the logging."""
formatted_msg = ensure_native_str(self.format(record))
log_level = record.levelno
# Forward the log record to kodi with translated log level
xbmc.log(formatted_msg, logging_map.get(log_level, 10))
# Keep a history of all debug records so they can be logged later if a critical error occurred
# Kodi by default, won't show debug messages unless debug logging is enabled
if log_level == 10:
self.debug_msgs.append(formatted_msg)
# If a critical error occurred, log all debug messages as warnings
elif log_level == 50 and self.debug_msgs:
xbmc.log("###### debug ######", xbmc.LOGWARNING)
for msg in self.debug_msgs:
xbmc.log(msg, xbmc.LOGWARNING)
xbmc.log("###### debug ######", xbmc.LOGWARNING)
class CallbackRef(object):
__slots__ = ("path", "parent", "is_playable", "is_folder")
def __init__(self, path, parent):
self.path = path.rstrip("/").replace(":", "/")
self.is_playable = parent.is_playable
self.is_folder = parent.is_folder
self.parent = parent
def __eq__(self, other):
return self.path == other.path
class Route(CallbackRef):
"""
Handle callback route data.
:param callback: The callable callback function.
:param parent: The parent class that will handle the response from callback.
:param str path: The route path to func/class.
:param dict parameters: Dict of parameters to pass to plugin instance.
"""
__slots__ = ("function", "callback", "parameters")
def __init__(self, callback, parent, path, parameters):
# Register a class callback
if inspect.isclass(callback):
msg = "Use of class based callbacks are Deprecated, please use function callbacks"
logger.warning("DeprecationWarning: " + msg)
if hasattr(callback, "run"):
parent = callback
self.function = callback.run
callback.test = staticmethod(self.unittest_caller)
else:
raise NameError("missing required 'run' method for class: '{}'".format(callback.__name__))
else:
# Register a function callback
callback.test = self.unittest_caller
self.parameters = parameters
self.function = callback
super(Route, self).__init__(path, parent)
self.callback = callback
def unittest_caller(self, *args, **kwargs):
"""
Function to allow callbacks to be easily called from unittests.
Parent argument will be auto instantiated and passed to callback.
This basically acts as a constructor to callback.
Test specific Keyword args:
execute_delayed: Execute any registered delayed callbacks.
:param args: Positional arguments to pass to callback.
:param kwargs: Keyword arguments to pass to callback.
:returns: The response from the callback function.
"""
execute_delayed = kwargs.pop("execute_delayed", False)
# Change the selector to match callback route been tested
# This will ensure that the plugin paths are currect
dispatcher.selector = self.path
# Update support params with the params
# that are to be passed to callback
if args:
dispatcher.params["_args_"] = args
if kwargs:
dispatcher.params.update(kwargs)
# Instantiate the parent
parent_ins = self.parent()
try:
# Now we are ready to call the callback function and return its results
results = self.function(parent_ins, *args, **kwargs)
if inspect.isgenerator(results):
results = list(results)
except Exception:
raise
else:
# Execute Delated callback functions if any
if execute_delayed:
dispatcher.run_delayed()
return results
finally:
# Reset global datasets
dispatcher.reset()
auto_sort.clear()
class Dispatcher(object):
"""Class to handle registering and dispatching of callback functions."""
def __init__(self):
self.registered_delayed = []
self.registered_routes = {}
self.callback_params = {}
self.selector = "root"
self.params = {}
self.handle = -1
def reset(self):
"""Reset session parameters."""
self.registered_delayed[:] = []
self.callback_params.clear()
kodi_logger.debug_msgs = []
self.selector = "root"
self.params.clear()
auto_sort.clear()
def parse_args(self, redirect=None):
"""Extract arguments given by Kodi"""
_, _, route, raw_params, _ = urlparse.urlsplit(redirect if redirect else sys.argv[0] + sys.argv[2])
self.selector = route if len(route) > 1 else "root"
self.handle = int(sys.argv[1])
if raw_params:
params = parse_qs(raw_params)
self.params.update(params)
# Unpickle pickled data
if "_pickle_" in params:
unpickled = pickle.loads(binascii.unhexlify(self.params.pop("_pickle_")))
self.params.update(unpickled)
# Construct a separate dictionary for callback specific parameters
self.callback_params = {key: value for key, value in self.params.items()
if not (key.startswith(u"_") and key.endswith(u"_"))}
def get_route(self, path=None): # type: (str) -> Route
"""
Return the given route callback.
:param str path: The route path, if not given defaults to current callback
"""
path = path.rstrip("/") if path else self.selector.rstrip("/")
# Attempt to import the module where the route
# is located if it's not already registered
if path not in self.registered_routes:
module_path = "resources.lib.main" if path == "root" else ".".join(path.strip("/").split("/")[:-1])
logger.debug("Attempting to import route: %s", module_path)
try:
importlib.import_module(module_path)
except ImportError:
raise RouteMissing("unable to import route module: %s" % module_path)
try:
return self.registered_routes[path]
except KeyError:
raise RouteMissing(path)
def register_callback(self, callback, parent, parameters):
"""Register route callback function"""
# Construct route path
path = callback.__name__.lower()
if path != "root":
path = "/{}/{}".format(callback.__module__.strip("_").replace(".", "/"), callback.__name__).lower()
# Register callback
if path in self.registered_routes:
logger.debug("encountered duplicate route: '%s'", path)
self.registered_routes[path] = route = Route(callback, parent, path, parameters)
callback.route = route
return callback
def register_delayed(self, *callback):
"""Register a function that will be called later, after content has been listed."""
self.registered_delayed.append(callback)
# noinspection PyIncorrectDocstring
def run_callback(self, process_errors=True, redirect=None):
"""
The starting point of the add-on.
This function will handle the execution of the "callback" functions.
The callback function that will be executed, will be auto selected.
The "root" callback, is the callback that will be the initial
starting point for the add-on.
:param bool process_errors: Enable/Disable internal error handler. (default => True)
:returns: Returns None if no errors were raised, or if errors were raised and process_errors is
True (default) then the error Exception that was raised will be returned.
returns the error Exception if an error ocurred.
:rtype: Exception or None
"""
self.reset()
self.parse_args(redirect)
logger.debug("Dispatching to route: '%s'", self.selector)
logger.debug("Callback parameters: '%s'", self.callback_params)
try:
# Fetch the controling class and callback function/method
route = self.get_route(self.selector)
execute_time = time.time()
# Initialize controller and execute callback
parent_ins = route.parent()
arg_params = self.params.get("_args_", [])
redirect = parent_ins(route, arg_params, self.callback_params)
except Exception as e:
self.run_delayed(e)
# Don't do anything with the error
# if process_errors is disabled
if not process_errors:
raise
try:
msg = str(e)
except UnicodeEncodeError:
# This is python 2 only code
# We only use unicode to fetch message when we
# know that we are dealing with unicode data
msg = unicode_type(e).encode("utf8")
# Log the error in both the gui and the kodi log file
logger.exception(msg)
dialog = xbmcgui.Dialog()
dialog.notification(e.__class__.__name__, msg, addon_data.getAddonInfo("icon"))
return e
else:
logger.debug("Route Execution Time: %ims", (time.time() - execute_time) * 1000)
self.run_delayed()
if redirect:
self.run_callback(process_errors, redirect)
def run_delayed(self, exception=None):
"""Execute all delayed callbacks, if any."""
if self.registered_delayed:
# Time before executing callbacks
start_time = time.time()
# Execute in order of last in first out (LIFO).
while self.registered_delayed:
func, args, kwargs, function_type = self.registered_delayed.pop()
if function_type == 2 or bool(exception) == function_type:
# Add raised exception to callback if requested
if "exception" in getfullargspec(func).args:
kwargs["exception"] = exception
try:
func(*args, **kwargs)
except Exception as e:
logger.exception(str(e))
# Log execution time of callbacks
logger.debug("Callbacks Execution Time: %ims", (time.time() - start_time) * 1000)
def build_path(callback=None, args=None, query=None, **extra_query):
"""
Build addon url that can be passed to kodi for kodi to use when calling listitems.
:param callback: [opt] The route selector path referencing the callback object. (default => current route selector)
:param tuple args: [opt] Positional arguments that will be add to plugin path.
:param dict query: [opt] A set of query key/value pairs to add to plugin path.
:param extra_query: [opt] Keyword arguments if given will be added to the current set of querys.
:return: Plugin url for kodi.
:rtype: str
"""
# Set callback to current callback if not given
if callback and hasattr(callback, "route"):
route = callback.route
elif isinstance(callback, CallbackRef):
route = callback
elif callback:
msg = "passing in callback path is deprecated, use callback reference 'Route.ref' instead"
logger.warning("DeprecationWarning: " + msg)
route = dispatcher.get_route(callback)
else:
route = dispatcher.get_route()
# Convert args to keyword args if required
if args:
query["_args_"] = args
# If extra querys are given then append the
# extra querys to the current set of querys
if extra_query:
query = dispatcher.params.copy()
query.update(extra_query)
# Encode the query parameters using json
if query:
pickled = binascii.hexlify(pickle.dumps(query, protocol=PICKLE_PROTOCOL))
query = "_pickle_={}".format(pickled.decode("ascii") if PY3 else pickled)
# Build kodi url with new path and query parameters
# NOTE: Kodi really needs a trailing '/'
return urlparse.urlunsplit(("plugin", plugin_id, route.path + "/", query, ""))
# Setup kodi logging
kodi_logger = KodiLogHandler()
base_logger = logging.getLogger()
base_logger.addHandler(kodi_logger)
base_logger.setLevel(logging.DEBUG)
base_logger.propagate = False
# Dispatcher to manage route callbacks
dispatcher = Dispatcher()
run = dispatcher.run_callback
get_route = dispatcher.get_route
| gpl-2.0 | 6,763,565,868,317,495,000 | 34.958838 | 119 | 0.619824 | false |
grlee77/numpy | numpy/lib/tests/test_stride_tricks.py | 1 | 18890 | import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
assert_equal, assert_array_equal, assert_raises, assert_,
assert_raises_regex, assert_warns,
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to,
broadcast_shapes,
)
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
# common output shape.
inarrays = [np.zeros(s) for s in input_shapes]
outarrays = broadcast_arrays(*inarrays)
outshapes = [a.shape for a in outarrays]
expected = [expected_shape] * len(inarrays)
assert_equal(outshapes, expected)
def assert_incompatible_shapes_raise(input_shapes):
# Broadcast a list of arrays with the given (incompatible) input shapes
# and check that they raise a ValueError.
inarrays = [np.zeros(s) for s in input_shapes]
assert_raises(ValueError, broadcast_arrays, *inarrays)
def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
# Broadcast two shapes against each other and check that the data layout
# is the same as if a ufunc did the broadcasting.
x0 = np.zeros(shape0, dtype=int)
# Note that multiply.reduce's identity element is 1.0, so when shape1==(),
# this gives the desired n==1.
n = int(np.multiply.reduce(shape1))
x1 = np.arange(n).reshape(shape1)
if transposed:
x0 = x0.T
x1 = x1.T
if flipped:
x0 = x0[::-1]
x1 = x1[::-1]
# Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
# result should be exactly the same as the broadcasted view of x1.
y = x0 + x1
b0, b1 = broadcast_arrays(x0, x1)
assert_array_equal(y, b1)
def test_same():
x = np.arange(10)
y = np.arange(10)
bx, by = broadcast_arrays(x, y)
assert_array_equal(x, bx)
assert_array_equal(y, by)
def test_broadcast_kwargs():
# ensure that a TypeError is appropriately raised when
# np.broadcast_arrays() is called with any keyword
# argument other than 'subok'
x = np.arange(10)
y = np.arange(10)
with assert_raises_regex(TypeError, 'got an unexpected keyword'):
broadcast_arrays(x, y, dtype='float64')
def test_one_off():
x = np.array([[1, 2, 3]])
y = np.array([[1], [2], [3]])
bx, by = broadcast_arrays(x, y)
bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
by0 = bx0.T
assert_array_equal(bx0, bx)
assert_array_equal(by0, by)
def test_same_input_shapes():
# Check that the final shape is just the input shape.
data = [
(),
(1,),
(3,),
(0, 1),
(0, 3),
(1, 0),
(3, 0),
(1, 3),
(3, 1),
(3, 3),
]
for shape in data:
input_shapes = [shape]
# Single input.
assert_shapes_correct(input_shapes, shape)
# Double input.
input_shapes2 = [shape, shape]
assert_shapes_correct(input_shapes2, shape)
# Triple input.
input_shapes3 = [shape, shape, shape]
assert_shapes_correct(input_shapes3, shape)
def test_two_compatible_by_ones_input_shapes():
# Check that two different input shapes of the same length, but some have
# ones, broadcast to the correct shape.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_two_compatible_by_prepending_ones_input_shapes():
# Check that two different input shapes (of different lengths) broadcast
# to the correct shape.
data = [
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_incompatible_shapes_raise_valueerror():
# Check that a ValueError is raised for incompatible shapes.
data = [
[(3,), (4,)],
[(2, 3), (2,)],
[(3,), (3,), (4,)],
[(1, 3, 4), (2, 3, 3)],
]
for input_shapes in data:
assert_incompatible_shapes_raise(input_shapes)
# Reverse the input shapes since broadcasting should be symmetric.
assert_incompatible_shapes_raise(input_shapes[::-1])
def test_same_as_ufunc():
# Check that the data layout is the same as if a ufunc did the operation.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_same_as_ufunc(input_shapes[0], input_shapes[1],
"Shapes: %s %s" % (input_shapes[0], input_shapes[1]))
# Reverse the input shapes since broadcasting should be symmetric.
assert_same_as_ufunc(input_shapes[1], input_shapes[0])
# Try them transposed, too.
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
# ... and flipped for non-rank-0 inputs in order to test negative
# strides.
if () not in input_shapes:
assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
def test_broadcast_to_succeeds():
data = [
[np.array(0), (0,), np.array(0)],
[np.array(0), (1,), np.zeros(1)],
[np.array(0), (3,), np.zeros(3)],
[np.ones(1), (1,), np.ones(1)],
[np.ones(1), (2,), np.ones(2)],
[np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
[np.arange(3), (3,), np.arange(3)],
[np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
[np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
# test if shape is not a tuple
[np.ones(0), 0, np.ones(0)],
[np.ones(1), 1, np.ones(1)],
[np.ones(1), 2, np.ones(2)],
# these cases with size 0 are strange, but they reproduce the behavior
# of broadcasting with ufuncs (see test_same_as_ufunc above)
[np.ones(1), (0,), np.ones(0)],
[np.ones((1, 2)), (0, 2), np.ones((0, 2))],
[np.ones((2, 1)), (2, 0), np.ones((2, 0))],
]
for input_array, shape, expected in data:
actual = broadcast_to(input_array, shape)
assert_array_equal(expected, actual)
def test_broadcast_to_raises():
data = [
[(0,), ()],
[(1,), ()],
[(3,), ()],
[(3,), (1,)],
[(3,), (2,)],
[(3,), (4,)],
[(1, 2), (2, 1)],
[(1, 1), (1,)],
[(1,), -1],
[(1,), (-1,)],
[(1, 2), (-1, 2)],
]
for orig_shape, target_shape in data:
arr = np.zeros(orig_shape)
assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
def test_broadcast_shape():
# tests internal _broadcast_shape
# _broadcast_shape is already exercised indirectly by broadcast_arrays
# _broadcast_shape is also exercised by the public broadcast_shapes function
assert_equal(_broadcast_shape(), ())
assert_equal(_broadcast_shape([1, 2]), (2,))
assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
# regression tests for gh-5862
assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
def test_broadcast_shapes_succeeds():
# tests public broadcast_shapes
data = [
[[], ()],
[[()], ()],
[[(7,)], (7,)],
[[(1, 2), (2,)], (1, 2)],
[[(1, 1)], (1, 1)],
[[(1, 1), (3, 4)], (3, 4)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[(5, 6, 1)], (5, 6, 1)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
[[(1,), (3,)], (3,)],
[[2, (3, 2)], (3, 2)],
]
for input_shapes, target_shape in data:
assert_equal(broadcast_shapes(*input_shapes), target_shape)
assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2))
assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2))
# regression tests for gh-5862
assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,))
def test_broadcast_shapes_raises():
# tests public broadcast_shapes
data = [
[(3,), (4,)],
[(2, 3), (2,)],
[(3,), (3,), (4,)],
[(1, 3, 4), (2, 3, 3)],
[(1, 2), (3,1), (3,2), (10, 5)],
[2, (2, 3)],
]
for input_shapes in data:
assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes))
bad_args = [(2,)] * 32 + [(3,)] * 32
assert_raises(ValueError, lambda: broadcast_shapes(*bad_args))
def test_as_strided():
a = np.array([None])
a_view = as_strided(a)
expected = np.array([None])
assert_array_equal(a_view, np.array([None]))
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
expected = np.array([1, 3])
assert_array_equal(a_view, expected)
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
assert_array_equal(a_view, expected)
# Regression test for gh-5081
dt = np.dtype([('num', 'i4'), ('obj', 'O')])
a = np.empty((4,), dtype=dt)
a['num'] = np.arange(1, 5)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
expected_num = [[1, 2, 3, 4]] * 3
expected_obj = [[None]*4]*3
assert_equal(a_view.dtype, dt)
assert_array_equal(expected_num, a_view['num'])
assert_array_equal(expected_obj, a_view['obj'])
# Make sure that void types without fields are kept unchanged
a = np.empty((4,), dtype='V4')
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
# Make sure that the only type that could fail is properly handled
dt = np.dtype({'names': [''], 'formats': ['V4']})
a = np.empty((4,), dtype=dt)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
# Custom dtypes should not be lost (gh-9161)
r = [rational(i) for i in range(4)]
a = np.array(r, dtype=rational)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
assert_array_equal([r] * 3, a_view)
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
assert_(not view.flags.writeable)
# Check that writeable also is fine:
view = as_strided(arr, writeable=True)
assert_(view.flags.writeable)
view[...] = 3
assert_array_equal(arr, np.full_like(arr, 3))
# Test that things do not break down for readonly:
arr.flags.writeable = False
view = as_strided(arr, writeable=False)
view = as_strided(arr, writeable=True)
assert_(not view.flags.writeable)
class VerySimpleSubClass(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, subok=True, **kwargs).view(cls)
class SimpleSubClass(VerySimpleSubClass):
def __new__(cls, *args, **kwargs):
self = np.array(*args, subok=True, **kwargs).view(cls)
self.info = 'simple'
return self
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '') + ' finalized'
def test_subclasses():
# test that subclass is preserved only if subok=True
a = VerySimpleSubClass([1, 2, 3, 4])
assert_(type(a) is VerySimpleSubClass)
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
assert_(type(a_view) is np.ndarray)
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
assert_(type(a_view) is VerySimpleSubClass)
# test that if a subclass has __array_finalize__, it is used
a = SimpleSubClass([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
# similar tests for broadcast_arrays
b = np.arange(len(a)).reshape(-1, 1)
a_view, b_view = broadcast_arrays(a, b)
assert_(type(a_view) is np.ndarray)
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
a_view, b_view = broadcast_arrays(a, b, subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
# and for broadcast_to
shape = (2, 4)
a_view = broadcast_to(a, shape)
assert_(type(a_view) is np.ndarray)
assert_(a_view.shape == shape)
a_view = broadcast_to(a, shape, subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
assert_(a_view.shape == shape)
def test_writeable():
# broadcast_to should return a readonly array
original = np.array([1, 2, 3])
result = broadcast_to(original, (2, 3))
assert_equal(result.flags.writeable, False)
assert_raises(ValueError, result.__setitem__, slice(None), 0)
# but the result of broadcast_arrays needs to be writeable, to
# preserve backwards compatibility
for is_broadcast, results in [(False, broadcast_arrays(original,)),
(True, broadcast_arrays(0, original))]:
for result in results:
# This will change to False in a future version
if is_broadcast:
with assert_warns(FutureWarning):
assert_equal(result.flags.writeable, True)
with assert_warns(DeprecationWarning):
result[:] = 0
# Warning not emitted, writing to the array resets it
assert_equal(result.flags.writeable, True)
else:
# No warning:
assert_equal(result.flags.writeable, True)
for results in [broadcast_arrays(original),
broadcast_arrays(0, original)]:
for result in results:
# resets the warn_on_write DeprecationWarning
result.flags.writeable = True
# check: no warning emitted
assert_equal(result.flags.writeable, True)
result[:] = 0
# keep readonly input readonly
original.flags.writeable = False
_, result = broadcast_arrays(0, original)
assert_equal(result.flags.writeable, False)
# regression test for GH6491
shape = (2,)
strides = [0]
tricky_array = as_strided(np.array(0), shape, strides)
other = np.zeros((1,))
first, second = broadcast_arrays(tricky_array, other)
assert_(first.shape == second.shape)
def test_writeable_memoryview():
# The result of broadcast_arrays exports as a non-writeable memoryview
# because otherwise there is no good way to opt in to the new behaviour
# (i.e. you would need to set writeable to False explicitly).
# See gh-13929.
original = np.array([1, 2, 3])
for is_broadcast, results in [(False, broadcast_arrays(original,)),
(True, broadcast_arrays(0, original))]:
for result in results:
# This will change to False in a future version
if is_broadcast:
# memoryview(result, writable=True) will give warning but cannot
# be tested using the python API.
assert memoryview(result).readonly
else:
assert not memoryview(result).readonly
def test_reference_types():
input_array = np.array('a', dtype=object)
expected = np.array(['a'] * 3, dtype=object)
actual = broadcast_to(input_array, (3,))
assert_array_equal(expected, actual)
actual, _ = broadcast_arrays(input_array, np.ones(3))
assert_array_equal(expected, actual)
| bsd-3-clause | -1,454,254,805,155,908,400 | 33.981481 | 82 | 0.521122 | false |
SamuelDauzon/Improllow-up | tasks/migrations/0006_auto_20151022_1917.py | 1 | 1081 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0005_auto_20151018_1303'),
]
operations = [
migrations.CreateModel(
name='TaskType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Date de création')),
('modified', models.DateTimeField(verbose_name='Date de modification', auto_now=True)),
('name', models.CharField(max_length=128, verbose_name='Nom du type de tâche')),
],
options={
'ordering': ('-created',),
'abstract': False,
},
),
migrations.AlterField(
model_name='task',
name='project',
field=models.ForeignKey(verbose_name='Type de tâche', to='tasks.TaskType'),
),
]
| mit | -790,270,236,375,810,000 | 32.6875 | 114 | 0.55102 | false |
virajkanwade/plugin.video.zeetv | resources/lib/slimit/lexer.py | 1 | 15051 | ###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <[email protected]>'
import ply.lex
from slimit.unicode_chars import (
LETTER,
DIGIT,
COMBINING_MARK,
CONNECTOR_PUNCTUATION,
)
# See "Regular Expression Literals" at
# http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html
TOKENS_THAT_IMPLY_DIVISON = frozenset([
'ID',
'NUMBER',
'STRING',
'REGEX',
'TRUE',
'FALSE',
'NULL',
'THIS',
'PLUSPLUS',
'MINUSMINUS',
'RPAREN',
'RBRACE',
'RBRACKET',
])
class Lexer(object):
"""A JavaScript lexer.
>>> from slimit.lexer import Lexer
>>> lexer = Lexer()
Lexer supports iteration:
>>> lexer.input('a = 1;')
>>> for token in lexer:
... print token
...
LexToken(ID,'a',1,0)
LexToken(EQ,'=',1,2)
LexToken(NUMBER,'1',1,4)
LexToken(SEMI,';',1,5)
Or call one token at a time with 'token' method:
>>> lexer.input('a = 1;')
>>> while True:
... token = lexer.token()
... if not token:
... break
... print token
...
LexToken(ID,'a',1,0)
LexToken(EQ,'=',1,2)
LexToken(NUMBER,'1',1,4)
LexToken(SEMI,';',1,5)
>>> lexer.input('a = 1;')
>>> token = lexer.token()
>>> token.type, token.value, token.lineno, token.lexpos
('ID', 'a', 1, 0)
For more information see:
http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
"""
def __init__(self):
self.prev_token = None
self.cur_token = None
self.next_tokens = []
self.build()
def build(self, **kwargs):
"""Build the lexer."""
self.lexer = ply.lex.lex(object=self, **kwargs)
def input(self, text):
self.lexer.input(text)
def token(self):
if self.next_tokens:
return self.next_tokens.pop()
lexer = self.lexer
while True:
pos = lexer.lexpos
try:
char = lexer.lexdata[pos]
while char in ' \t':
pos += 1
char = lexer.lexdata[pos]
next_char = lexer.lexdata[pos + 1]
except IndexError:
tok = self._get_update_token()
if tok is not None and tok.type == 'LINE_TERMINATOR':
continue
else:
return tok
if char != '/' or (char == '/' and next_char in ('/', '*')):
tok = self._get_update_token()
if tok.type in ('LINE_TERMINATOR',
'LINE_COMMENT', 'BLOCK_COMMENT'):
continue
else:
return tok
# current character is '/' which is either division or regex
cur_token = self.cur_token
is_division_allowed = (
cur_token is not None and
cur_token.type in TOKENS_THAT_IMPLY_DIVISON
)
if is_division_allowed:
return self._get_update_token()
else:
self.prev_token = self.cur_token
self.cur_token = self._read_regex()
return self.cur_token
def auto_semi(self, token):
if (token is None or token.type == 'RBRACE'
or self._is_prev_token_lt()
):
if token:
self.next_tokens.append(token)
return self._create_semi_token(token)
def _is_prev_token_lt(self):
return self.prev_token and self.prev_token.type == 'LINE_TERMINATOR'
def _read_regex(self):
self.lexer.begin('regex')
token = self.lexer.token()
self.lexer.begin('INITIAL')
return token
def _get_update_token(self):
self.prev_token = self.cur_token
self.cur_token = self.lexer.token()
# insert semicolon before restricted tokens
# See section 7.9.1 ECMA262
if (self.cur_token is not None
and self.cur_token.type == 'LINE_TERMINATOR'
and self.prev_token is not None
and self.prev_token.type in ['BREAK', 'CONTINUE',
'RETURN', 'THROW']
):
return self._create_semi_token(self.cur_token)
return self.cur_token
def _create_semi_token(self, orig_token):
token = ply.lex.LexToken()
token.type = 'SEMI'
token.value = ';'
if orig_token is not None:
token.lineno = orig_token.lineno
token.lexpos = orig_token.lexpos
else:
token.lineno = 0
token.lexpos = 0
return token
# iterator protocol
def __iter__(self):
return self
def next(self):
token = self.token()
if not token:
raise StopIteration
return token
states = (
('regex', 'exclusive'),
)
keywords = (
'BREAK', 'CASE', 'CATCH', 'CONTINUE', 'DEBUGGER', 'DEFAULT', 'DELETE',
'DO', 'ELSE', 'FINALLY', 'FOR', 'FUNCTION', 'IF', 'IN',
'INSTANCEOF', 'NEW', 'RETURN', 'SWITCH', 'THIS', 'THROW', 'TRY',
'TYPEOF', 'VAR', 'VOID', 'WHILE', 'WITH', 'NULL', 'TRUE', 'FALSE',
# future reserved words - well, it's uncommented now to make
# IE8 happy because it chokes up on minification:
# obj["class"] -> obj.class
# 'CLASS', 'CONST', 'ENUM', 'EXPORT', 'EXTENDS', 'IMPORT', 'SUPER',
)
keywords_dict = dict((key.lower(), key) for key in keywords)
tokens = (
# Punctuators
'PERIOD', 'COMMA', 'SEMI', 'COLON', # . , ; :
'PLUS', 'MINUS', 'MULT', 'DIV', 'MOD', # + - * / %
'BAND', 'BOR', 'BXOR', 'BNOT', # & | ^ ~
'CONDOP', # conditional operator ?
'NOT', # !
'LPAREN', 'RPAREN', # ( and )
'LBRACE', 'RBRACE', # { and }
'LBRACKET', 'RBRACKET', # [ and ]
'EQ', 'EQEQ', 'NE', # = == !=
'STREQ', 'STRNEQ', # === and !==
'LT', 'GT', # < and >
'LE', 'GE', # <= and >=
'OR', 'AND', # || and &&
'PLUSPLUS', 'MINUSMINUS', # ++ and --
'LSHIFT', # <<
'RSHIFT', 'URSHIFT', # >> and >>>
'PLUSEQUAL', 'MINUSEQUAL', # += and -=
'MULTEQUAL', 'DIVEQUAL', # *= and /=
'LSHIFTEQUAL', # <<=
'RSHIFTEQUAL', 'URSHIFTEQUAL', # >>= and >>>=
'ANDEQUAL', 'MODEQUAL', # &= and %=
'XOREQUAL', 'OREQUAL', # ^= and |=
# Terminal types
'NUMBER', 'STRING', 'ID', 'REGEX',
# Properties
'GETPROP', 'SETPROP',
# Comments
'LINE_COMMENT', 'BLOCK_COMMENT',
'LINE_TERMINATOR',
) + keywords
# adapted from https://bitbucket.org/ned/jslex
t_regex_REGEX = r"""(?:
/ # opening slash
# First character is..
(?: [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
(?: [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
(?: [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
(?: [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
)
"""
t_regex_ignore = ' \t'
def t_regex_error(self, token):
raise TypeError(
"Error parsing regular expression '%s' at %s" % (
token.value, token.lineno)
)
# Punctuators
t_PERIOD = r'\.'
t_COMMA = r','
t_SEMI = r';'
t_COLON = r':'
t_PLUS = r'\+'
t_MINUS = r'-'
t_MULT = r'\*'
t_DIV = r'/'
t_MOD = r'%'
t_BAND = r'&'
t_BOR = r'\|'
t_BXOR = r'\^'
t_BNOT = r'~'
t_CONDOP = r'\?'
t_NOT = r'!'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACE = r'{'
t_RBRACE = r'}'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_EQ = r'='
t_EQEQ = r'=='
t_NE = r'!='
t_STREQ = r'==='
t_STRNEQ = r'!=='
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_OR = r'\|\|'
t_AND = r'&&'
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_URSHIFT = r'>>>'
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_MULTEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_URSHIFTEQUAL = r'>>>='
t_ANDEQUAL = r'&='
t_MODEQUAL = r'%='
t_XOREQUAL = r'\^='
t_OREQUAL = r'\|='
#t_LINE_COMMENT = r'//[^\r\n]*'
#t_BLOCK_COMMENT = r'/\*[^*]*\*+([^/*][^*]*\*+)*/'
t_LINE_TERMINATOR = r'[\n\r]+'
t_ignore = ' \t'
t_NUMBER = r"""
(?:
0[xX][0-9a-fA-F]+ # hex_integer_literal
| 0[0-7]+ # or octal_integer_literal (spec B.1.1)
| (?: # or decimal_literal
(?:0|[1-9][0-9]*) # decimal_integer_literal
\. # dot
[0-9]* # decimal_digits_opt
(?:[eE][+-]?[0-9]+)? # exponent_part_opt
|
\. # dot
[0-9]+ # decimal_digits
(?:[eE][+-]?[0-9]+)? # exponent_part_opt
|
(?:0|[1-9][0-9]*) # decimal_integer_literal
(?:[eE][+-]?[0-9]+)? # exponent_part_opt
)
)
"""
string = r"""
(?:
# double quoted string
(?:" # opening double quote
(?: [^"\\\n\r] # no \, line terminators or "
| \\[a-zA-Z!-\/:-@\[-`{-~] # or escaped characters
| \\x[0-9a-fA-F]{2} # or hex_escape_sequence
| \\u[0-9a-fA-F]{4} # or unicode_escape_sequence
)*? # zero or many times
(?: \\\n # multiline ?
(?:
[^"\\\n\r] # no \, line terminators or "
| \\[a-zA-Z!-\/:-@\[-`{-~] # or escaped characters
| \\x[0-9a-fA-F]{2} # or hex_escape_sequence
| \\u[0-9a-fA-F]{4} # or unicode_escape_sequence
)*? # zero or many times
)*
") # closing double quote
|
# single quoted string
(?:' # opening single quote
(?: [^'\\\n\r] # no \, line terminators or '
| \\[a-zA-Z!-\/:-@\[-`{-~] # or escaped characters
| \\x[0-9a-fA-F]{2} # or hex_escape_sequence
| \\u[0-9a-fA-F]{4} # or unicode_escape_sequence
)*? # zero or many times
(?: \\\n # multiline ?
(?:
[^'\\\n\r] # no \, line terminators or '
| \\[a-zA-Z!-\/:-@\[-`{-~] # or escaped characters
| \\x[0-9a-fA-F]{2} # or hex_escape_sequence
| \\u[0-9a-fA-F]{4} # or unicode_escape_sequence
)*? # zero or many times
)*
') # closing single quote
)
""" # "
@ply.lex.TOKEN(string)
def t_STRING(self, token):
# remove escape + new line sequence used for strings
# written across multiple lines of code
token.value = token.value.replace('\\\n', '')
return token
# XXX: <ZWNJ> <ZWJ> ?
identifier_start = r'(?:' + r'[a-zA-Z_$]' + r'|' + LETTER + r')+'
identifier_part = (
r'(?:' + COMBINING_MARK + r'|' + r'[0-9a-zA-Z_$]' + r'|' + DIGIT +
r'|' + CONNECTOR_PUNCTUATION + r')*'
)
identifier = identifier_start + identifier_part
getprop = r'get' + r'(?=\s' + identifier + r')'
@ply.lex.TOKEN(getprop)
def t_GETPROP(self, token):
return token
setprop = r'set' + r'(?=\s' + identifier + r')'
@ply.lex.TOKEN(setprop)
def t_SETPROP(self, token):
return token
@ply.lex.TOKEN(identifier)
def t_ID(self, token):
token.type = self.keywords_dict.get(token.value, 'ID')
return token
def t_error(self, token):
print 'Illegal character %r at %s:%s after %s' % (
token.value[0], token.lineno, token.lexpos, self.prev_token)
token.lexer.skip(1)
| gpl-3.0 | 7,872,050,808,526,940,000 | 33.441648 | 79 | 0.431732 | false |
h4ck3rm1k3/gcc_py_introspector | gcc/tree/tnode.py | 1 | 1044 | class TNode :
def __init__(self, node_id, node_type, o):
self.node_id=node_id
self.node_type=node_type
#self.o=o
def nid(self):
return self.node_id.n
def pstack(self):
r = ""
debug( "Stack:%s" % pprint2.pformat(self.o.stack))
#debug(pprint2.pformat(dir(self.o)))
#debug(pprint2.pformat(self.o.__dict__))
for s in self.o.stack:
if s.type == '$end':
pass
else:
s1= "pstack[type:%s t2:%s value:%s]," % (s.type, type(s.value), s.value.node_id)
r = r + s1
debug( "Stack",s,pprint2.pformat(s))
#debug( "Stack",s,pprint2.pformat(dir(s)))
debug( "Stack",s,pprint2.pformat(s.__dict__))
return r
def __repr__(self):
return "!TNode:id='%s',type:'%s'!" % (
self.node_id.n,
self.node_type,
#'pstack'
#self.pstack()
#pprint2.pformat(self.o.__dict__)
)
| gpl-2.0 | 5,055,035,353,580,054,000 | 31.625 | 96 | 0.45977 | false |
GutenkunstLab/SloppyCell | Example/PC12/PC12Run.py | 1 | 1172 | from SloppyCell.ReactionNetworks import *
import Nets
reload(Nets)
import Experiments as Expts
import Calculations as Calcs
m = Model([
Expts.ErkMekTraverse2EGF.expt,
Expts.ErkMekTraverse2NGF.expt,
Expts.Raf1LandrethEGF.expt,
Expts.Rap1YorkNGF.expt,
Expts.RasGreen1NGF.expt,
],
[Nets.EGFstim100,
Nets.NGFstim50,
])
params = m.get_params().copy()
c = m.cost(params)
print('Cost before optimization: {0:.5f}'.format(c))
perturb = 4
import numpy as np
np.random.seed(2131)
pnew = params.copy()
for ii,v in enumerate(pnew):
pnew[ii] = v * perturb**np.random.uniform(-1,1)
print('Cost of perturbed params: {0:.5f}'.format(m.cost(pnew)))
Plotting.figure()
Plotting.plot_model_results(m)
Plotting.title('Before optimization')
## Save network with perturbed parameters to file
#IO.to_SBML_file(Nets.EGFstim100, 'params_perturbed.xml')
popt = Optimization.fmin_lm_log_params(m, pnew, disp=1, maxiter=8)
print('Cost of optimized params: {0:.5f}'.format(m.cost(popt)))
Plotting.figure()
Plotting.plot_model_results(m)
Plotting.title('After optimization')
Plotting.show()
| bsd-3-clause | 5,435,433,293,142,724,000 | 24.478261 | 66 | 0.685154 | false |
lehmannro/translate | storage/xml_extract/unit_tree.py | 1 | 4885 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2010 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from lxml import etree
from translate.storage import base, xliff
from translate.misc.typecheck import accepts, Self, IsOneOf
from translate.misc.typecheck.typeclasses import Number
class XPathTree(object):
@accepts(Self(), IsOneOf(base.TranslationUnit, type(None)))
def __init__(self, unit = None):
self.unit = unit
self.children = {}
def __eq__(self, other):
return isinstance(other, XPathTree) and \
self.unit == other.unit and \
self.children == other.children
@accepts(unicode)
def _split_xpath_component(xpath_component):
"""Split an xpath component into a tag-index tuple.
>>> split_xpath_component('{urn:oasis:names:tc:opendocument:xmlns:office:1.0}document-content[0]')
('{urn:oasis:names:tc:opendocument:xmlns:office:1.0}document-content', 0).
"""
lbrac = xpath_component.rfind(u'[')
rbrac = xpath_component.rfind(u']')
tag = xpath_component[:lbrac]
index = int(xpath_component[lbrac+1:rbrac])
return tag, index
@accepts(unicode)
def _split_xpath(xpath):
"""Split an 'xpath' string separated by / into a reversed list of its components. Thus:
>>> split_xpath('document-content[1]/body[2]/text[3]/p[4]')
[('p', 4), ('text', 3), ('body', 2), ('document-content', 1)]
The list is reversed so that it can be used as a stack, where the top of the stack is
the first component.
"""
if xliff.ID_SEPARATOR in xpath:
xpath = xpath.split(xliff.ID_SEPARATOR)[-1]
components = xpath.split(u'/')
components = [_split_xpath_component(component) for component in components]
return list(reversed(components))
@accepts(IsOneOf(etree._Element, XPathTree), [(unicode, Number)], base.TranslationUnit)
def _add_unit_to_tree(node, xpath_components, unit):
"""Walk down the tree rooted a node, and follow nodes which correspond to the
components of xpath_components. When reaching the end of xpath_components,
set the reference of the node to unit.
With reference to the tree diagram in build_unit_tree::
add_unit_to_tree(node, [('p', 2), ('text', 3), ('body', 2), ('document-content', 1)], unit)
would begin by popping ('document-content', 1) from the path and following the node marked
('document-content', 1) in the tree. Likewise, will descend down the nodes marked ('body', 2)
and ('text', 3).
Since the node marked ('text', 3) has no child node marked ('p', 2), this node is created. Then
the add_unit_to_tree descends down this node. When this happens, there are no xpath components
left to pop. Thus, node.unit = unit is executed.
"""
if len(xpath_components) > 0:
component = xpath_components.pop() # pop the stack; is a component such as ('p', 4)
# if the current node does not have any children indexed by
# the current component, add such a child
if component not in node.children:
node.children[component] = XPathTree()
_add_unit_to_tree(node.children[component], xpath_components, unit)
else:
node.unit = unit
@accepts(base.TranslationStore)
def build_unit_tree(store):
"""Enumerate a translation store and build a tree with XPath components as nodes
and where a node contains a unit if a path from the root of the tree to the node
containing the unit, is equal to the XPath of the unit.
The tree looks something like this::
root
`- ('document-content', 1)
`- ('body', 2)
|- ('text', 1)
| `- ('p', 1)
| `- <reference to a unit>
|- ('text', 2)
| `- ('p', 1)
| `- <reference to a unit>
`- ('text', 3)
`- ('p', 1)
`- <reference to a unit>
"""
tree = XPathTree()
for unit in store.units:
location = _split_xpath(unit.getlocations()[0])
_add_unit_to_tree(tree, location, unit)
return tree
| gpl-2.0 | 5,338,542,481,059,170,000 | 39.371901 | 102 | 0.641146 | false |
pcmanus/cassandra-dtest | upgrade_through_versions_test.py | 1 | 20763 | import bisect, os, random, re, subprocess, time, uuid, unittest
from collections import defaultdict
from distutils.version import LooseVersion
from cql import OperationalError
from dtest import Tester, debug, DISABLE_VNODES, DEFAULT_DIR
from tools import new_node, not_implemented
TRUNK_VER = '2.2'
# Used to build upgrade path(s) for tests. Some tests will go from start to finish,
# other tests will focus on single upgrades from UPGRADE_PATH[n] to UPGRADE_PATH[n+1]
# Note that these strings should match git branch names, and will be used to search for
# tags which are related to a particular branch as well.
UPGRADE_PATH = ['cassandra-1.1', 'cassandra-1.2', 'cassandra-2.0', 'cassandra-2.1', 'trunk']
class GitSemVer(object):
"""
Wraps a git ref up with a semver (as LooseVersion)
"""
git_ref = None
semver = None
def __init__(self, git_ref, semver_str):
self.git_ref = 'git:' + git_ref
self.semver = LooseVersion(semver_str)
if semver_str == 'trunk':
self.semver = LooseVersion(TRUNK_VER)
def __cmp__(self, other):
return cmp(self.semver, other.semver)
def latest_tag_matching(match_string='cassandra-1.1'):
"""
Returns the latest tag matching match_string*
"""
git_path = os.environ.get('CASSANDRA_DIR', DEFAULT_DIR)
tags = subprocess.check_output(
["git", "tag", "-l", "{search}*".format(search=match_string)], cwd=git_path)\
.rstrip()\
.split('\n')
wrappers = []
for t in tags:
match = re.match('^cassandra-(\d+\.\d+\.\d+(-+\w+)*)$', t)
if match:
gsv = GitSemVer(t, match.group(1))
bisect.insort(wrappers, gsv)
if wrappers:
return wrappers.pop().git_ref
return None
def get_version_from_tag(tag):
if tag == 'trunk':
return TRUNK_VER
match = re.match('^(git:)*cassandra-(\d+\.\d+\.*\d*(-+\w+)*)$', tag)
if match:
return match.group(2)
return None
def get_version_from_build():
path = os.environ.get('CASSANDRA_DIR', DEFAULT_DIR)
build = os.path.join(path, 'build.xml')
with open(build) as f:
for line in f:
match = re.search('name="base\.version" value="([0-9.]+)[^"]*"', line)
if match:
return LooseVersion(match.group(1))
class TestUpgradeThroughVersions(Tester):
"""
Upgrades a 3-node Murmur3Partitioner cluster through versions specified in test_versions.
"""
test_versions = None # set on init to know which versions to use
def __init__(self, *args, **kwargs):
# Ignore these log patterns:
self.ignore_log_patterns = [
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
]
# Force cluster options that are common among versions:
kwargs['cluster_options'] = {'partitioner':'org.apache.cassandra.dht.Murmur3Partitioner'}
Tester.__init__(self, *args, **kwargs)
@property
def test_versions(self):
# Murmur was not present until 1.2+
return ['git:'+v for v in UPGRADE_PATH if get_version_from_tag(v) >= '1.2']
def setUp(self):
# Forcing cluster version on purpose
os.environ['CASSANDRA_VERSION'] = self.test_versions[0]
debug("Versions to test (%s): %s" % (type(self), str([v for v in self.test_versions])))
super(TestUpgradeThroughVersions, self).setUp()
def upgrade_test(self):
self.upgrade_scenario()
def upgrade_test_mixed(self):
"""Only upgrade part of the cluster, so we have mixed versions part way through."""
self.upgrade_scenario(mixed_version=True)
def upgrade_scenario(self, populate=True, create_schema=True, mixed_version=False, after_upgrade_call=()):
# Record the rows we write as we go:
self.row_values = set()
cluster = self.cluster
if populate:
# Start with 3 node cluster
debug('Creating cluster (%s)' % self.test_versions[0])
cluster.populate(3)
[node.start(use_jna=True) for node in cluster.nodelist()]
else:
debug("Skipping cluster creation (should already be built)")
# add nodes to self for convenience
for i, node in enumerate(cluster.nodelist(), 1):
node_name = 'node'+str(i)
setattr(self, node_name, node)
if create_schema:
self._create_schema()
else:
debug("Skipping schema creation (should already be built)")
time.sleep(5) #sigh...
self._log_current_ver(self.test_versions[0])
# upgrade through versions
for tag in self.test_versions[1:]:
if mixed_version:
for num, node in enumerate(self.cluster.nodelist()):
# do a write and check for each new node as upgraded
self._write_values()
self._increment_counters()
self.upgrade_to_version(tag, mixed_version=True, nodes=(node,))
self._check_values()
self._check_counters()
debug('Successfully upgraded %d of %d nodes to %s' %
(num+1, len(self.cluster.nodelist()), tag))
else:
self._write_values()
self._increment_counters()
self.upgrade_to_version(tag)
self._check_values()
self._check_counters()
# run custom post-upgrade callables
for call in after_upgrade_call:
call()
debug('All nodes successfully upgraded to %s' % tag)
self._log_current_ver(tag)
cluster.stop()
def upgrade_to_version(self, tag, mixed_version=False, nodes=None):
"""Upgrade Nodes - if *mixed_version* is True, only upgrade those nodes
that are specified by *nodes*, otherwise ignore *nodes* specified
and upgrade all nodes.
"""
debug('Upgrading to ' + tag)
if not mixed_version:
nodes = self.cluster.nodelist()
for node in nodes:
debug('Shutting down node: ' + node.name)
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
# Update Cassandra Directory
for node in nodes:
node.set_cassandra_dir(cassandra_version=tag)
debug("Set new cassandra dir for %s: %s" % (node.name, node.get_cassandra_dir()))
self.cluster.set_cassandra_dir(cassandra_version=tag)
# Restart nodes on new version
for node in nodes:
debug('Starting %s on new version (%s)' % (node.name, tag))
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=True)
node.nodetool('upgradesstables upgrade cf countertable')
def _log_current_ver(self, current_tag):
"""
Logs where we currently are in the upgrade path, surrounding the current branch/tag, like ***sometag***
"""
vers = self.test_versions
curr_index = vers.index(current_tag)
debug(
"Current upgrade path: {}".format(
vers[:curr_index] + ['***'+current_tag+'***'] + vers[curr_index+1:]))
def _create_schema(self):
cursor = self.patient_cql_connection(self.node2).cursor()
if self.cluster.version() >= '1.2':
#DDL for C* 1.2+
cursor.execute("""CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy',
'replication_factor':2};
""")
else:
# DDL for C* 1.1
cursor.execute("""CREATE KEYSPACE upgrade WITH strategy_class = 'SimpleStrategy'
AND strategy_options:replication_factor = 2;
""")
cursor.execute('use upgrade')
cursor.execute('CREATE TABLE cf ( k int PRIMARY KEY , v text )')
cursor.execute('CREATE INDEX vals ON cf (v)')
if self.cluster.version() >= '1.2':
cursor.execute("""
CREATE TABLE countertable (k text PRIMARY KEY, c counter);""")
else:
cursor.execute("""
CREATE TABLE countertable (k text PRIMARY KEY, c counter)
WITH default_validation=CounterColumnType;""")
def _write_values(self, num=100):
cursor = self.patient_cql_connection(self.node2).cursor()
cursor.execute("use upgrade")
for i in xrange(num):
x = len(self.row_values) + 1
cursor.execute("UPDATE cf SET v='%d' WHERE k=%d" % (x,x))
self.row_values.add(x)
def _check_values(self, consistency_level='ALL'):
for node in self.cluster.nodelist():
cursor = self.patient_cql_connection(node).cursor()
cursor.execute("use upgrade")
for x in self.row_values:
cursor.execute("SELECT k,v FROM cf WHERE k=%d" % x, consistency_level=consistency_level)
k,v = cursor.fetchone()
self.assertEqual(x, k)
self.assertEqual(str(x), v)
def _increment_counters(self, seconds=15):
debug("incrementing counter for {time} seconds".format(time=seconds))
cursor = self.patient_cql_connection(self.node2).cursor()
cursor.execute("use upgrade;")
update_counter_query = ("UPDATE countertable SET c = c + 1 WHERE k='{key}'")
uuids = [uuid.uuid4() for i in range(100)]
self.expected_counts = defaultdict(int)
expiry=time.time()+seconds
while time.time() < expiry:
counter_key = random.choice(uuids)
try:
cursor.execute( update_counter_query.format(key=counter_key) )
except OperationalError:
pass
else:
self.expected_counts[counter_key] += 1
# make sure 100 succeeded
assert sum(self.expected_counts.values()) > 100
def _check_counters(self, consistency_level='ALL'):
debug("Checking counter values...")
cursor = self.patient_cql_connection(self.node2).cursor()
cursor.execute("use upgrade;")
for counter_key, value in self.expected_counts.items():
cursor.execute("SELECT c from countertable where k='{key}';".format(key=counter_key), consistency_level=consistency_level)
res = cursor.fetchone()[0]
assert res == value, "Counter not at expected value."
class TestRandomPartitionerUpgrade(TestUpgradeThroughVersions):
"""
Upgrades a 3-node RandomPartitioner cluster through versions specified in test_versions.
"""
def __init__(self, *args, **kwargs):
# Ignore these log patterns:
self.ignore_log_patterns = [
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
]
# Force cluster options that are common among versions:
kwargs['cluster_options'] = {'partitioner':'org.apache.cassandra.dht.RandomPartitioner'}
Tester.__init__(self, *args, **kwargs)
@property
def test_versions(self):
return ['git:'+v for v in UPGRADE_PATH]
class PointToPointUpgradeBase(TestUpgradeThroughVersions):
"""
Base class for testing a single upgrade (ver1->ver2).
We are dynamically creating subclasses of this for testing point upgrades, so this is a convenient
place to add functionality/tests for those subclasses to run.
__test__ is False for this class. Subclasses need to revert to True to run tests!
"""
__test__ = False
def setUp(self):
# Forcing cluster version on purpose
os.environ['CASSANDRA_VERSION'] = self.test_versions[0]
super(TestUpgradeThroughVersions, self).setUp()
# if this is a shuffle test, we want to specifically disable vnodes initially
# so that we can enable them later and do shuffle
if self.id().split('.')[-1] in ('shuffle_test', 'shuffle_multidc_test'):
debug("setting custom cluster config for shuffle_test")
if self.cluster.version() >= "1.2":
self.cluster.set_configuration_options(values={'num_tokens': None})
debug("Versions to test (%s): %s" % (type(self), str([v for v in self.test_versions])))
def _bootstrap_new_node(self):
# Check we can bootstrap a new node on the upgraded cluster:
debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000+len(self.cluster.nodes)))
nnode.start(use_jna=True, wait_other_notice=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def _bootstrap_new_node_multidc(self):
# Check we can bootstrap a new node on the upgraded cluster:
debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000+len(self.cluster.nodes)), data_center='dc2')
nnode.start(use_jna=True, wait_other_notice=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def _migrate_to_vnodes(self):
if not DISABLE_VNODES and self.cluster.version() >= '1.2':
for node in self.cluster.nodelist():
debug('Shutting down node: ' + node.name)
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
debug("moving cluster to vnodes")
self.cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 10})
# just a hacky way to get the topology set again, since it seems to get lost
self.cluster.set_cassandra_dir(cassandra_dir=self.cluster.get_cassandra_dir())
# Restart nodes on new version
for node in self.cluster.nodelist():
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=True)
debug("Running shuffle")
mark = self.node1.mark_log()
self.node1.shuffle("create")
self.node1.shuffle("enable")
self.node1.watch_log_for("Pausing until token count stabilizes", from_mark=mark, timeout=60)
else:
debug("Not migrating to vnodes because they are disabled or cluster is not above v1.2")
@unittest.skipIf(DISABLE_VNODES, "vnodes disabled for this test run")
def shuffle_test(self):
# go from non-vnodes to vnodes, and run shuffle to distribute the data.
self.upgrade_scenario(
after_upgrade_call=(self._migrate_to_vnodes, self._check_values, self._check_counters))
def bootstrap_test(self):
# try and add a new node
self.upgrade_scenario(after_upgrade_call=(self._bootstrap_new_node,))
@unittest.skipIf(DISABLE_VNODES, "vnodes disabled for this test run")
def shuffle_multidc_test(self):
# go from non-vnodes to vnodes, and run shuffle to distribute the data.
# multi dc, 2 nodes in each dc
self.cluster.populate([2,2])
[node.start(use_jna=True) for node in self.cluster.nodelist()]
self._multidc_schema_create()
self.upgrade_scenario(populate=False, create_schema=False,
after_upgrade_call=(self._migrate_to_vnodes, self._check_values, self._check_counters))
def bootstrap_multidc_test(self):
# try and add a new node
# multi dc, 2 nodes in each dc
self.cluster.populate([2,2])
[node.start(use_jna=True) for node in self.cluster.nodelist()]
self._multidc_schema_create()
self.upgrade_scenario(populate=False, create_schema=False, after_upgrade_call=(self._bootstrap_new_node_multidc,))
def _multidc_schema_create(self):
cursor = self.patient_cql_connection(self.cluster.nodelist()[0]).cursor()
if self.cluster.version() >= '1.2':
#DDL for C* 1.2+
cursor.execute("""CREATE KEYSPACE upgrade WITH replication = {'class':'NetworkTopologyStrategy',
'dc1':1, 'dc2':1};
""")
else:
# DDL for C* 1.1
cursor.execute("""CREATE KEYSPACE upgrade WITH strategy_class = 'NetworkTopologyStrategy'
AND strategy_options:'dc1':1
AND strategy_options:'dc2':1;
""")
cursor.execute('use upgrade')
cursor.execute('CREATE TABLE cf ( k int PRIMARY KEY , v text )')
cursor.execute('CREATE INDEX vals ON cf (v)')
if self.cluster.version() >= '1.2':
cursor.execute("""
CREATE TABLE countertable (k text PRIMARY KEY, c counter);""")
else:
cursor.execute("""
CREATE TABLE countertable (k text PRIMARY KEY, c counter)
WITH default_validation=CounterColumnType;""")
# create test classes for upgrading from latest tag on branch to the head of that same branch
for from_ver in UPGRADE_PATH:
# we only want to do single upgrade tests for 1.2+
# and trunk is the final version, so there's no test where trunk is upgraded to something else
if get_version_from_tag(from_ver) >= '1.2' and from_ver != 'trunk':
cls_name = ('TestUpgrade_from_'+from_ver+'_latest_tag_to_'+from_ver+'_HEAD').replace('-', '_').replace('.', '_')
debug('Creating test upgrade class: {}'.format(cls_name))
vars()[cls_name] = type(
cls_name,
(PointToPointUpgradeBase,),
{'test_versions': [latest_tag_matching(from_ver), 'git:'+from_ver,], '__test__':True})
# build a list of tuples like so:
# [(A, B), (B, C) ... ]
# each pair in the list represents an upgrade test (A, B)
# where we will upgrade from the latest *tag* matching A, to the HEAD of branch B
POINT_UPGRADES = []
points = [v for v in UPGRADE_PATH if get_version_from_tag(v) >= '1.2']
for i, _ in enumerate(points):
verslice = tuple(points[i:i+2])
if len(verslice) == 2: # exclude dangling version at end
POINT_UPGRADES.append( tuple(points[i:i+2]) )
# create test classes for upgrading from latest tag on one branch, to head of the next branch (see comment above)
for (from_ver, to_branch) in POINT_UPGRADES:
cls_name = ('TestUpgrade_from_'+from_ver+'_latest_tag_to_'+to_branch+'_HEAD').replace('-', '_').replace('.', '_')
debug('Creating test upgrade class: {}'.format(cls_name))
vars()[cls_name] = type(
cls_name,
(PointToPointUpgradeBase,),
{'test_versions': [latest_tag_matching(from_ver), 'git:'+to_branch,], '__test__':True})
# create test classes for upgrading from HEAD of one branch to HEAD of next.
for (from_branch, to_branch) in POINT_UPGRADES:
cls_name = ('TestUpgrade_from_'+from_branch+'_HEAD_to_'+to_branch+'_HEAD').replace('-', '_').replace('.', '_')
debug('Creating test upgrade class: {}'.format(cls_name))
vars()[cls_name] = type(
cls_name,
(PointToPointUpgradeBase,),
{'test_versions': ['git:'+from_branch, 'git:'+to_branch,], '__test__':True})
# create test classes for upgrading from HEAD of one branch, to latest tag of next branch
for (from_branch, to_branch) in POINT_UPGRADES:
cls_name = ('TestUpgrade_from_'+from_branch+'_HEAD_to_'+to_branch+'_latest_tag').replace('-', '_').replace('.', '_')
debug('Creating test upgrade class: {}'.format(cls_name))
# in some cases we might not find a tag (like when the to_branch is trunk)
# so these will be skipped.
if latest_tag_matching(to_branch) is None:
continue
vars()[cls_name] = type(
cls_name,
(PointToPointUpgradeBase,),
{'test_versions': ['git:'+from_branch, latest_tag_matching(to_branch),], '__test__':True})
| apache-2.0 | -1,917,096,939,052,648,700 | 40.860887 | 134 | 0.592063 | false |
ayepezv/GAD_ERP | addons/website/models/ir_ui_view.py | 2 | 8995 | # -*- coding: ascii -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from itertools import groupby
from lxml import etree
from odoo import api, fields, models
from odoo import tools
from odoo.addons.website.models import website
from odoo.http import request
_logger = logging.getLogger(__name__)
class View(models.Model):
_name = "ir.ui.view"
_inherit = ["ir.ui.view", "website.seo.metadata"]
page = fields.Boolean("Whether this view is a web page template (complete)", default=False)
customize_show = fields.Boolean("Show As Optional Inherit", default=False)
website_id = fields.Many2one('website', ondelete='cascade', string="Website")
@api.multi
def unlink(self):
result = super(View, self).unlink()
self.clear_caches()
return result
@api.multi
def _sort_suitability_key(self):
""" Key function to sort views by descending suitability
Suitability of a view is defined as follow:
* if the view and request website_id are matched
* then if the view has no set website
"""
self.ensure_one()
context_website_id = self.env.context.get('website_id', 1)
website_id = self.website_id.id or 0
different_website = context_website_id != website_id
return (different_website, website_id)
def filter_duplicate(self):
""" Filter current recordset only keeping the most suitable view per distinct key """
filtered = self.env['ir.ui.view']
for dummy, group in groupby(self, key=lambda record: record.key):
filtered += sorted(group, key=lambda record: record._sort_suitability_key())[0]
return filtered
@api.model
def _view_obj(self, view_id):
if isinstance(view_id, basestring):
if 'website_id' in self._context:
domain = [('key', '=', view_id), '|', ('website_id', '=', False), ('website_id', '=', self._context.get('website_id'))]
order = 'website_id'
else:
domain = [('key', '=', view_id)]
order = self._order
views = self.search(domain, order=order)
if views:
return views.filter_duplicate()
else:
return self.env.ref(view_id)
elif isinstance(view_id, (int, long)):
return self.browse(view_id)
# assume it's already a view object (WTF?)
return view_id
# Returns all views (called and inherited) related to a view
# Used by translation mechanism, SEO and optional templates
@api.model
def _views_get(self, view_id, options=True, bundles=False, root=True):
""" For a given view ``view_id``, should return:
* the view itself
* all views inheriting from it, enabled or not
- but not the optional children of a non-enabled child
* all views called from it (via t-call)
:returns recordset of ir.ui.view
"""
try:
view = self._view_obj(view_id)
except ValueError:
_logger.warning("Could not find view object with view_id '%s'", view_id)
return []
while root and view.inherit_id:
view = view.inherit_id
views_to_return = view
node = etree.fromstring(view.arch)
xpath = "//t[@t-call]"
if bundles:
xpath += "| //t[@t-call-assets]"
for child in node.xpath(xpath):
try:
called_view = self._view_obj(child.get('t-call', child.get('t-call-assets')))
except ValueError:
continue
if called_view not in views_to_return:
views_to_return += self._views_get(called_view, options=options, bundles=bundles)
extensions = view.inherit_children_ids
if not options:
# only active children
extensions = view.inherit_children_ids.filtered(lambda view: view.active)
# Keep options in a deterministic order regardless of their applicability
for extension in extensions.sorted(key=lambda v: v.id):
# only return optional grandchildren if this child is enabled
for ext_view in self._views_get(extension, options=extension.active, root=False):
if ext_view not in views_to_return:
views_to_return += ext_view
return views_to_return
@api.model
@tools.ormcache_context('self._uid', 'xml_id', keys=('website_id',))
def get_view_id(self, xml_id):
if 'website_id' in self._context and not isinstance(xml_id, (int, long)):
domain = [('key', '=', xml_id), '|', ('website_id', '=', self._context['website_id']), ('website_id', '=', False)]
view = self.search(domain, order='website_id', limit=1)
if not view:
_logger.warning("Could not find view object with xml_id '%s'", xml_id)
raise ValueError('View %r in website %r not found' % (xml_id, self._context['website_id']))
return view.id
return super(View, self).get_view_id(xml_id)
@api.multi
def render(self, values=None, engine='ir.qweb'):
""" Render the template. If website is enabled on request, then extend rendering context with website values. """
new_context = dict(self._context)
if request and getattr(request, 'website_enabled', False):
qcontext = self._prepare_qcontext()
# add some values
if values:
qcontext.update(values)
# in edit mode ir.ui.view will tag nodes
if not qcontext.get('translatable') and not qcontext.get('rendering_bundle'):
if qcontext.get('editable'):
new_context = dict(self._context, inherit_branding=True)
elif request.env.user.has_group('base.group_website_publisher'):
new_context = dict(self._context, inherit_branding_auto=True)
if 'main_object' not in qcontext:
qcontext['main_object'] = self
values = qcontext
return super(View, self.with_context(new_context)).render(values, engine=engine)
@api.model
def _prepare_qcontext(self):
""" Returns the qcontext : rendering context with website specific value (required
to render website layout template)
"""
company = request.website.company_id.sudo()
editable = request.website.is_publisher()
translatable = editable and self._context.get('lang') != request.website.default_lang_code
editable = not translatable and editable
qcontext = dict(
self._context.copy(),
website=request.website,
url_for=website.url_for,
slug=website.slug,
res_company=company,
user_id=self.env["res.users"].browse(self.env.user.id),
default_lang_code=request.website.default_lang_code,
languages=request.website.get_languages(),
translatable=translatable,
editable=editable,
menu_data=self.env['ir.ui.menu'].load_menus_root() if request.website.is_user() else None,
)
return qcontext
@api.model
def customize_template_get(self, key, full=False, bundles=False):
""" Get inherit view's informations of the template ``key``. By default, only
returns ``customize_show`` templates (which can be active or not), if
``full=True`` returns inherit view's informations of the template ``key``.
``bundles=True`` returns also the asset bundles
"""
imd = self.env['ir.model.data']
view_theme_id = imd.xmlid_to_res_id('website.theme')
user = self.env.user
user_groups = set(user.groups_id)
views = self.with_context(active_test=False)._views_get(key, bundles=bundles)
done = set()
result = []
for view in views:
if not user_groups.issuperset(view.groups_id):
continue
if full or (view.customize_show and view.inherit_id.id != view_theme_id):
if view.inherit_id not in done:
result.append({
'name': view.inherit_id.name,
'id': view.id,
'key': view.key,
'inherit_id': view.inherit_id.id,
'header': True,
'active': False
})
done.add(view.inherit_id)
result.append({
'name': view.name,
'id': view.id,
'key': view.key,
'inherit_id': view.inherit_id.id,
'header': False,
'active': view.active,
})
return result
| gpl-3.0 | 412,984,447,094,891,260 | 39.886364 | 135 | 0.572318 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.